diff options
720 files changed, 29409 insertions, 13170 deletions
diff --git a/.clang-format b/.clang-format index 86c20ee744de..48405c54ef27 100644 --- a/.clang-format +++ b/.clang-format @@ -722,6 +722,13 @@ ForEachMacros: - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' - 'virtio_device_for_each_vq' + - 'vkms_config_for_each_connector' + - 'vkms_config_for_each_crtc' + - 'vkms_config_for_each_encoder' + - 'vkms_config_for_each_plane' + - 'vkms_config_connector_for_each_possible_encoder' + - 'vkms_config_encoder_for_each_possible_crtc' + - 'vkms_config_plane_for_each_possible_crtc' - 'while_for_each_ftrace_op' - 'workloads__for_each' - 'xa_for_each' diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon index cb207c79680d..4ca917ac6382 100644 --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon @@ -124,3 +124,27 @@ Contact: intel-xe@lists.freedesktop.org Description: RO. VRAM temperature in millidegree Celsius. Only supported for particular Intel Xe graphics platforms. + +What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/fan1_input +Date: March 2025 +KernelVersion: 6.16 +Contact: intel-xe@lists.freedesktop.org +Description: RO. Fan 1 speed in RPM. + + Only supported for particular Intel Xe graphics platforms. + +What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/fan2_input +Date: March 2025 +KernelVersion: 6.16 +Contact: intel-xe@lists.freedesktop.org +Description: RO. Fan 2 speed in RPM. + + Only supported for particular Intel Xe graphics platforms. + +What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/fan3_input +Date: March 2025 +KernelVersion: 6.16 +Contact: intel-xe@lists.freedesktop.org +Description: RO. Fan 3 speed in RPM. + + Only supported for particular Intel Xe graphics platforms. diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst index 4bdc394e86af..125fd0397510 100644 --- a/Documentation/core-api/printk-formats.rst +++ b/Documentation/core-api/printk-formats.rst @@ -648,6 +648,38 @@ Examples:: %p4cc Y10 little-endian (0x20303159) %p4cc NV12 big-endian (0xb231564e) +Generic FourCC code +------------------- + +:: + %p4c[hnlb] gP00 (0x67503030) + +Print a generic FourCC code, as both ASCII characters and its numerical +value as hexadecimal. + +The generic FourCC code is always printed in the big-endian format, +the most significant byte first. This is the opposite of V4L/DRM FourCCs. + +The additional ``h``, ``n``, ``l``, and ``b`` specifiers define what +endianness is used to load the stored bytes. The data might be interpreted +using the host byte order, network byte order, little-endian, or big-endian. + +Passed by reference. + +Examples for a little-endian machine, given &(u32)0x67503030:: + + %p4ch gP00 (0x67503030) + %p4cn 00Pg (0x30305067) + %p4cl gP00 (0x67503030) + %p4cb 00Pg (0x30305067) + +Examples for a big-endian machine, given &(u32)0x67503030:: + + %p4ch gP00 (0x67503030) + %p4cn 00Pg (0x30305067) + %p4cl 00Pg (0x30305067) + %p4cb gP00 (0x67503030) + Rust ---- diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml new file mode 100644 index 000000000000..f619aea82bdf --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/himax,hx8279.yaml @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/himax,hx8279.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Himax HX8279/HX8279-D based MIPI-DSI panels + +maintainers: + - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + +description: + The Himax HX8279 is a 1803 channel outputs source driver with MIPI + TCON, which generates the horizontal and vertical control timing to + the source and gate drivers. + This DriverIC is most suitable for 1200x1920, 1080x1920, 1200x1600, + and 600x1024 panels and outputs full RGB888 over two or four lanes, + single or dual, MIPI-DSI video interface. + +allOf: + - $ref: panel-common-dual.yaml# + +properties: + compatible: + items: + - enum: + - aoly,sl101pm1794fog-v15 + - startek,kd070fhfid078 + - const: himax,hx8279 + + reg: + maxItems: 1 + + iovcc-supply: + description: I/O voltage supply + + vdd-supply: + description: Panel power supply + +required: + - compatible + - reg + - backlight + - reset-gpios + - iovcc-supply + - vdd-supply + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "startek,kd070fhfid078", "himax,hx8279"; + reg = <0>; + backlight = <&backlight>; + enable-gpios = <&pio 25 GPIO_ACTIVE_HIGH>; + reset-gpios = <&pio 45 GPIO_ACTIVE_HIGH>; + iovcc-supply = <&vreg_lcm_vio>; + vdd-supply = <&vreg_lcm_vdd>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index b0de4fd6f3d4..5542c9229d54 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -226,6 +226,8 @@ properties: - netron-dy,e231732 # Newhaven Display International 480 x 272 TFT LCD panel - newhaven,nhd-4.3-480272ef-atxl + # NLT Technologies, Ltd. 15.6" WXGA (1366×768) LVDS TFT LCD panel + - nlt,nl13676bc25-03f # New Vision Display 7.0" 800 RGB x 480 TFT LCD panel - nvd,9128 # OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel @@ -246,6 +248,8 @@ properties: - osddisplays,osd070t1718-19ts # One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel - osddisplays,osd101t2045-53ts + # POWERTIP PH128800T004-ZZA01 10.1" WXGA TFT LCD panel + - powertip,ph128800t004-zza01 # POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel - powertip,ph128800t006-zhc01 # POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel @@ -284,6 +288,8 @@ properties: - startek,kd070wvfpa # Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel - team-source-display,tst043015cmhx + # Tianma Micro-electronics P0700WXF1MBAA 7.0" WXGA (1280x800) LVDS TFT LCD panel + - tianma,p0700wxf1mbaa # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel - tianma,tm070jdhg30 # Tianma Micro-electronics TM070JDHG34-00 7.0" WXGA (1280x800) LVDS TFT LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml index 684c2896d238..31f0c0f038e4 100644 --- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml +++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml @@ -19,6 +19,8 @@ properties: - const: samsung,atna33xc20 - items: - enum: + # Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel + - samsung,atna40yk20 # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel - samsung,atna45af01 # Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel diff --git a/Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml b/Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml new file mode 100644 index 000000000000..49dcd9b8f670 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/visionox,g2647fb105.yaml @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/visionox,g2647fb105.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Visionox G2647FB105 6.47" 1080x2340 MIPI-DSI Panel + +maintainers: + - Alexander Baransky <sanyapilot496@gmail.com> + +description: + The Visionox G2647FB105 is a 6.47 inch 1080x2340 MIPI-DSI CMD mode OLED panel. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: visionox,g2647fb105 + + reg: + maxItems: 1 + + vdd3p3-supply: + description: 3.3V source voltage rail + + vddio-supply: + description: I/O source voltage rail + + vsn-supply: + description: Negative source voltage rail + + vsp-supply: + description: Positive source voltage rail + + reset-gpios: true + port: true + +required: + - compatible + - reg + - vdd3p3-supply + - vddio-supply + - vsn-supply + - vsp-supply + - reset-gpios + - port + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "visionox,g2647fb105"; + reg = <0>; + + vdd3p3-supply = <&vreg_l7c_3p0>; + vddio-supply = <&vreg_l13a_1p8>; + vsn-supply = <&vreg_ibb>; + vsp-supply = <&vreg_lab>; + + reset-gpios = <&pm6150l_gpios 9 GPIO_ACTIVE_LOW>; + + port { + panel_in: endpoint { + remote-endpoint = <&mdss_dsi0_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml index 60dedf9b2be7..d99b23b88cc5 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml @@ -15,6 +15,7 @@ properties: enum: - rockchip,rk3288-dp - rockchip,rk3399-edp + - rockchip,rk3588-edp clocks: minItems: 2 @@ -31,16 +32,23 @@ properties: maxItems: 1 resets: - maxItems: 1 + minItems: 1 + maxItems: 2 reset-names: - const: dp + minItems: 1 + items: + - const: dp + - const: apb rockchip,grf: $ref: /schemas/types.yaml#/definitions/phandle description: This SoC makes use of GRF regs. + aux-bus: + $ref: /schemas/display/dp-aux-bus.yaml# + required: - compatible - clocks @@ -52,6 +60,19 @@ required: allOf: - $ref: /schemas/display/bridge/analogix,dp.yaml# + - if: + properties: + compatible: + contains: + enum: + - rockchip,rk3588-edp + then: + properties: + resets: + minItems: 2 + reset-names: + minItems: 2 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml index b339b7e708c6..8b5f58103dda 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml @@ -73,12 +73,6 @@ properties: port: $ref: /schemas/graph.yaml#/properties/port - assigned-clocks: - maxItems: 2 - - assigned-clock-rates: - maxItems: 2 - iommus: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/sitronix,st7571.yaml b/Documentation/devicetree/bindings/display/sitronix,st7571.yaml new file mode 100644 index 000000000000..4fea782fccd7 --- /dev/null +++ b/Documentation/devicetree/bindings/display/sitronix,st7571.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/sitronix,st7571.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sitronix ST7571 Display Controller + +maintainers: + - Marcus Folkesson <marcus.folkesson@gmail.com> + +description: + Sitronix ST7571 is a driver and controller for 4-level gray + scale and monochrome dot matrix LCD panels. + +allOf: + - $ref: panel/panel-common.yaml# + +properties: + compatible: + const: sitronix,st7571 + + reg: + maxItems: 1 + + sitronix,grayscale: + type: boolean + description: + Display supports 4-level grayscale. + + reset-gpios: true + width-mm: true + height-mm: true + panel-timing: true + +required: + - compatible + - reg + - reset-gpios + - width-mm + - height-mm + - panel-timing + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + display@3f { + compatible = "sitronix,st7571"; + reg = <0x3f>; + reset-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; + width-mm = <37>; + height-mm = <27>; + + panel-timing { + hactive = <128>; + vactive = <96>; + hback-porch = <0>; + vback-porch = <0>; + clock-frequency = <0>; + hfront-porch = <0>; + hsync-len = <0>; + vfront-porch = <0>; + vsync-len = <0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml index dc078ceeca9a..43c6d2d72456 100644 --- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml +++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Broadcom V3D GPU maintainers: - - Eric Anholt <eric@anholt.net> + - Maíra Canal <mcanal@igalia.com> - Nicolas Saenz Julienne <nsaenzjulienne@suse.de> properties: @@ -22,20 +22,12 @@ properties: - brcm,7278-v3d reg: - items: - - description: hub register (required) - - description: core0 register (required) - - description: GCA cache controller register (if GCA controller present) - - description: bridge register (if no external reset controller) minItems: 2 + maxItems: 4 reg-names: - items: - - const: hub - - const: core0 - - enum: [ bridge, gca ] - - enum: [ bridge, gca ] minItems: 2 + maxItems: 4 interrupts: items: @@ -58,6 +50,76 @@ required: - reg-names - interrupts +allOf: + - if: + properties: + compatible: + contains: + const: brcm,2711-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + reg-names: + items: + - const: hub + - const: core0 + - if: + properties: + compatible: + contains: + const: brcm,2712-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + - description: SMS state manager register + reg-names: + items: + - const: hub + - const: core0 + - const: sms + - if: + properties: + compatible: + contains: + const: brcm,7268-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + - description: GCA cache controller register + - description: bridge register + reg-names: + items: + - const: hub + - const: core0 + - const: gca + - const: bridge + - if: + properties: + compatible: + contains: + const: brcm,7278-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + - description: bridge register + reg-names: + items: + - const: hub + - const: core0 + - const: bridge + additionalProperties: false examples: @@ -66,9 +128,9 @@ examples: compatible = "brcm,7268-v3d"; reg = <0xf1200000 0x4000>, <0xf1208000 0x4000>, - <0xf1204000 0x100>, - <0xf1204100 0x100>; - reg-names = "hub", "core0", "bridge", "gca"; + <0xf1204100 0x100>, + <0xf1204000 0x100>; + reg-names = "hub", "core0", "gca", "bridge"; interrupts = <0 78 4>, <0 77 4>; }; diff --git a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml index 256e252f8087..4450e2e73b3c 100644 --- a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml +++ b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml @@ -12,10 +12,28 @@ maintainers: properties: compatible: - items: - - enum: - - ti,am62-gpu - - const: img,img-axe # IMG AXE GPU model/revision is fully discoverable + oneOf: + - items: + - enum: + - ti,am62-gpu + - const: img,img-axe-1-16m + # This deprecated element must be kept around to allow old kernels to + # work with newer dts. + - const: img,img-axe + - const: img,img-rogue + - items: + - enum: + - ti,j721s2-gpu + - const: img,img-bxs-4-64 + - const: img,img-rogue + + # This legacy combination of compatible strings was introduced early on + # before the more specific GPU identifiers were used. + - items: + - enum: + - ti,am62-gpu + - const: img,img-axe + deprecated: true reg: maxItems: 1 @@ -35,6 +53,18 @@ properties: maxItems: 1 power-domains: + minItems: 1 + maxItems: 2 + + power-domain-names: + items: + - const: a + - const: b + minItems: 1 + + dma-coherent: true + + resets: maxItems: 1 required: @@ -47,11 +77,49 @@ required: additionalProperties: false allOf: + # Constraints added alongside the new compatible strings that would otherwise + # create an ABI break. + - if: + properties: + compatible: + contains: + const: img,img-rogue + then: + required: + - power-domains + - power-domain-names + + - if: + properties: + compatible: + contains: + const: img,img-axe-1-16m + then: + properties: + power-domains: + maxItems: 1 + power-domain-names: + maxItems: 1 + + - if: + properties: + compatible: + contains: + const: img,img-bxs-4-64 + then: + properties: + power-domains: + minItems: 2 + power-domain-names: + minItems: 2 + - if: properties: compatible: contains: - const: ti,am62-gpu + enum: + - ti,am62-gpu + - ti,j721s2-gpu then: properties: clocks: @@ -64,10 +132,12 @@ examples: #include <dt-bindings/soc/ti,sci_pm_domain.h> gpu@fd00000 { - compatible = "ti,am62-gpu", "img,img-axe"; + compatible = "ti,am62-gpu", "img,img-axe-1-16m", "img,img-axe", + "img,img-rogue"; reg = <0x0fd00000 0x20000>; clocks = <&k3_clks 187 0>; clock-names = "core"; interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; power-domains = <&k3_pds 187 TI_SCI_PD_EXCLUSIVE>; + power-domain-names = "a"; }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 86f6a19b28ae..6136c8832a66 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -129,6 +129,8 @@ patternProperties: description: Andes Technology Corporation "^anvo,.*": description: Anvo-Systems Dresden GmbH + "^aoly,.*": + description: Shenzhen Aoly Technology Co., Ltd. "^aosong,.*": description: Guangzhou Aosong Electronic Co., Ltd. "^apm,.*": diff --git a/Documentation/gpu/automated_testing.rst b/Documentation/gpu/automated_testing.rst index 6d7c6086034d..62aa3ede02a5 100644 --- a/Documentation/gpu/automated_testing.rst +++ b/Documentation/gpu/automated_testing.rst @@ -115,6 +115,10 @@ created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines) 5. The various jobs will be run and when the pipeline is finished, all jobs should be green unless a regression has been found. +6. Warnings in the pipeline indicate that lockdep +(see Documentation/locking/lockdep-design.rst) issues have been detected +during the tests. + How to update test expectations =============================== diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst index 971cdb4816fc..1f15a8ca1265 100644 --- a/Documentation/gpu/driver-uapi.rst +++ b/Documentation/gpu/driver-uapi.rst @@ -27,3 +27,8 @@ drm/xe uAPI =========== .. kernel-doc:: include/uapi/drm/xe_drm.h + +drm/asahi uAPI +================ + +.. kernel-doc:: include/uapi/drm/asahi_drm.h diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index b4ee25af1702..5139705089f2 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -233,6 +233,21 @@ Panel Self Refresh Helper Reference .. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c :export: +HDMI Atomic State Helpers +========================= + +Overview +-------- + +.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c + :doc: hdmi helpers + +Functions Reference +------------------- + +.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c + :export: + HDCP Helper Functions Reference =============================== diff --git a/Documentation/gpu/nouveau.rst b/Documentation/gpu/nouveau.rst index 0f34131ccc27..b8c801e0068c 100644 --- a/Documentation/gpu/nouveau.rst +++ b/Documentation/gpu/nouveau.rst @@ -27,3 +27,6 @@ GSP Support .. kernel-doc:: drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c :doc: GSP message queue element + +.. kernel-doc:: drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h + :doc: GSP message handling policy diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 256d0d1cb216..c57777a24e03 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -441,14 +441,15 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de> Level: Intermediate -Request memory regions in all drivers -------------------------------------- +Request memory regions in all fbdev drivers +-------------------------------------------- -Go through all drivers and add code to request the memory regions that the -driver uses. This requires adding calls to request_mem_region(), +Old/ancient fbdev drivers do not request their memory properly. +Go through these drivers and add code to request the memory regions +that the driver uses. This requires adding calls to request_mem_region(), pci_request_region() or similar functions. Use helpers for managed cleanup -where possible. - +where possible. Problematic areas include hardware that has exclusive ranges +like VGA. VGA16fb does not request the range as it is expected. Drivers are pretty bad at doing this and there used to be conflicts among DRM and fbdev drivers. Still, it's the correct thing to do. diff --git a/Documentation/gpu/vgaarbiter.rst b/Documentation/gpu/vgaarbiter.rst index bde3c0afb059..d1e953712cc2 100644 --- a/Documentation/gpu/vgaarbiter.rst +++ b/Documentation/gpu/vgaarbiter.rst @@ -11,9 +11,9 @@ Section 7, Legacy Devices. The Resource Access Control (RAC) module inside the X server [0] existed for the legacy VGA arbitration task (besides other bus management tasks) when more -than one legacy device co-exists on the same machine. But the problem happens +than one legacy device co-exist on the same machine. But the problem happens when these devices are trying to be accessed by different userspace clients -(e.g. two server in parallel). Their address assignments conflict. Moreover, +(e.g. two servers in parallel). Their address assignments conflict. Moreover, ideally, being a userspace application, it is not the role of the X server to control bus resources. Therefore an arbitration scheme outside of the X server is needed to control the sharing of these resources. This document introduces @@ -106,7 +106,7 @@ In-kernel interface libpciaccess ------------ -To use the vga arbiter char device it was implemented an API inside the +To use the vga arbiter char device, an API was implemented inside the libpciaccess library. One field was added to struct pci_device (each device on the system):: diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst index 92cfb25e64d3..b2369561f24e 100644 --- a/Documentation/gpu/xe/index.rst +++ b/Documentation/gpu/xe/index.rst @@ -25,3 +25,4 @@ DG2, etc is provided to prototype the driver. xe_debugging xe_devcoredump xe-drm-usage-stats.rst + xe_configfs diff --git a/Documentation/gpu/xe/xe_configfs.rst b/Documentation/gpu/xe/xe_configfs.rst new file mode 100644 index 000000000000..9b9d941eb20e --- /dev/null +++ b/Documentation/gpu/xe/xe_configfs.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +.. _xe_configfs: + +============ +Xe Configfs +============ + +.. kernel-doc:: drivers/gpu/drm/xe/xe_configfs.c + :doc: Xe Configfs diff --git a/Documentation/gpu/xe/xe_pcode.rst b/Documentation/gpu/xe/xe_pcode.rst index d2e22cc45061..5937ef3599b0 100644 --- a/Documentation/gpu/xe/xe_pcode.rst +++ b/Documentation/gpu/xe/xe_pcode.rst @@ -12,3 +12,10 @@ Internal API .. kernel-doc:: drivers/gpu/drm/xe/xe_pcode.c :internal: + +================== +Boot Survivability +================== + +.. kernel-doc:: drivers/gpu/drm/xe/xe_survivability_mode.c + :doc: Xe Boot Survivability diff --git a/MAINTAINERS b/MAINTAINERS index 69511c3b2b76..38df6b159a3b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2306,6 +2306,7 @@ F: drivers/watchdog/apple_wdt.c F: include/dt-bindings/interrupt-controller/apple-aic.h F: include/dt-bindings/pinctrl/apple.h F: include/linux/soc/apple/* +F: include/uapi/drm/asahi_drm.h ARM/ARTPEC MACHINE SUPPORT M: Jesper Nilsson <jesper.nilsson@axis.com> @@ -7379,8 +7380,7 @@ M: Javier Martinez Canillas <javierm@redhat.com> L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git -F: drivers/gpu/drm/tiny/ofdrm.c -F: drivers/gpu/drm/tiny/simpledrm.c +F: drivers/gpu/drm/sysfb/ F: drivers/video/aperture.c F: drivers/video/nomodeset.c F: include/linux/aperture.h @@ -7691,6 +7691,12 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: Documentation/devicetree/bindings/display/sitronix,st7586.txt F: drivers/gpu/drm/tiny/st7586.c +DRM DRIVER FOR SITRONIX ST7571 PANELS +M: Marcus Folkesson <marcus.folkesson@gmail.com> +S: Maintained +F: Documentation/devicetree/bindings/display/sitronix,st7571.yaml +F: drivers/gpu/drm/tiny/st7571-i2c.c + DRM DRIVER FOR SITRONIX ST7701 PANELS M: Jagan Teki <jagan@amarulasolutions.com> S: Maintained @@ -7862,8 +7868,8 @@ F: drivers/gpu/drm/ci/xfails/meson* F: drivers/gpu/drm/meson/ DRM DRIVERS FOR ATMEL HLCDC -M: Sam Ravnborg <sam@ravnborg.org> -M: Boris Brezillon <bbrezillon@kernel.org> +M: Manikandan Muralidharan <manikandan.m@microchip.com> +M: Dharma Balasubiramani <dharma.b@microchip.com> L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -8191,7 +8197,8 @@ F: drivers/gpu/drm/ttm/ F: include/drm/ttm/ DRM AUTOMATED TESTING -M: Helen Koike <helen.koike@collabora.com> +M: Helen Koike <helen.fornazier@gmail.com> +M: Vignesh Raman <vignesh.raman@collabora.com> L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -11935,13 +11942,10 @@ F: drivers/gpio/gpio-tangier.c F: drivers/gpio/gpio-tangier.h INTEL GVT-g DRIVERS (Intel GPU Virtualization) -M: Zhenyu Wang <zhenyuw.linux@gmail.com> -M: Zhi Wang <zhi.wang.linux@gmail.com> -L: intel-gvt-dev@lists.freedesktop.org -L: intel-gfx@lists.freedesktop.org -S: Supported +R: Zhenyu Wang <zhenyuw.linux@gmail.com> +R: Zhi Wang <zhi.wang.linux@gmail.com> +S: Odd Fixes W: https://github.com/intel/gvt-linux/wiki -T: git https://github.com/intel/gvt-linux.git F: drivers/gpu/drm/i915/gvt/ INTEL HID EVENT DRIVER @@ -25678,6 +25682,7 @@ F: include/uapi/linux/virtio_gpio.h VIRTIO GPU DRIVER M: David Airlie <airlied@redhat.com> M: Gerd Hoffmann <kraxel@redhat.com> +M: Dmitry Osipenko <dmitry.osipenko@collabora.com> R: Gurchetan Singh <gurchetansingh@chromium.org> R: Chia-I Wu <olvaffe@gmail.com> L: dri-devel@lists.freedesktop.org diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO index 5119bccd1917..ad8ac6e315b6 100644 --- a/drivers/accel/amdxdna/TODO +++ b/drivers/accel/amdxdna/TODO @@ -1,3 +1,2 @@ -- Add import and export BO support - Add debugfs support - Add debug BO support diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index 00d215ac866e..e04549f64d69 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu static int aie2_populate_range(struct amdxdna_gem_obj *abo) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); - struct mm_struct *mm = abo->mem.notifier.mm; - struct hmm_range range = { 0 }; + struct amdxdna_umap *mapp; unsigned long timeout; + struct mm_struct *mm; + bool found; int ret; - XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx", - abo->mem.userptr, abo->mem.size); - range.notifier = &abo->mem.notifier; - range.start = abo->mem.userptr; - range.end = abo->mem.userptr + abo->mem.size; - range.hmm_pfns = abo->mem.pfns; - range.default_flags = HMM_PFN_REQ_FAULT; + timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); +again: + found = false; + down_write(&xdna->notifier_lock); + list_for_each_entry(mapp, &abo->mem.umap_list, node) { + if (mapp->invalid) { + found = true; + break; + } + } - if (!mmget_not_zero(mm)) + if (!found) { + abo->mem.map_invalid = false; + up_write(&xdna->notifier_lock); + return 0; + } + kref_get(&mapp->refcnt); + up_write(&xdna->notifier_lock); + + XDNA_DBG(xdna, "populate memory range %lx %lx", + mapp->vma->vm_start, mapp->vma->vm_end); + mm = mapp->notifier.mm; + if (!mmget_not_zero(mm)) { + amdxdna_umap_put(mapp); return -EFAULT; + } - timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); -again: - range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier); + mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier); mmap_read_lock(mm); - ret = hmm_range_fault(&range); + ret = hmm_range_fault(&mapp->range); mmap_read_unlock(mm); if (ret) { if (time_after(jiffies, timeout)) { @@ -786,21 +801,27 @@ again: goto put_mm; } - if (ret == -EBUSY) + if (ret == -EBUSY) { + amdxdna_umap_put(mapp); goto again; + } goto put_mm; } - down_read(&xdna->notifier_lock); - if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) { - up_read(&xdna->notifier_lock); + down_write(&xdna->notifier_lock); + if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) { + up_write(&xdna->notifier_lock); + amdxdna_umap_put(mapp); goto again; } - abo->mem.map_invalid = false; - up_read(&xdna->notifier_lock); + mapp->invalid = false; + up_write(&xdna->notifier_lock); + amdxdna_umap_put(mapp); + goto again; put_mm: + amdxdna_umap_put(mapp); mmput(mm); return ret; } @@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, struct drm_gem_object *gobj = to_gobj(abo); long ret; - down_write(&xdna->notifier_lock); - abo->mem.map_invalid = true; - mmu_interval_set_seq(&abo->mem.notifier, cur_seq); - up_write(&xdna->notifier_lock); ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP, true, MAX_SCHEDULE_TIMEOUT); if (!ret || ret == -ERESTARTSYS) diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index bf4219e32cc1..82412eec9a4b 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -525,7 +525,7 @@ aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset, if (!payload) return -EINVAL; - if (!slot_cf_has_space(offset, payload_len)) + if (!slot_has_space(*buf, offset, payload_len)) return -ENOSPC; buf->cu_idx = cu_idx; @@ -558,7 +558,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset, if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE) return -EINVAL; - if (!slot_dpu_has_space(offset, arg_sz)) + if (!slot_has_space(*buf, offset, arg_sz)) return -ENOSPC; buf->inst_buf_addr = sn->buffer; @@ -569,7 +569,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset, memcpy(buf->args, sn->prop_args, arg_sz); /* Accurate buf size to hint firmware to do necessary copy */ - *size += sizeof(*buf) + arg_sz; + *size = sizeof(*buf) + arg_sz; return 0; } diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h index 4e02e744b470..6df9065b13f6 100644 --- a/drivers/accel/amdxdna/aie2_msg_priv.h +++ b/drivers/accel/amdxdna/aie2_msg_priv.h @@ -319,18 +319,16 @@ struct async_event_msg_resp { } __packed; #define MAX_CHAIN_CMDBUF_SIZE SZ_4K -#define slot_cf_has_space(offset, payload_size) \ - (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \ - offsetof(struct cmd_chain_slot_execbuf_cf, args[0])) +#define slot_has_space(slot, offset, payload_size) \ + (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \ + sizeof(typeof(slot))) + struct cmd_chain_slot_execbuf_cf { __u32 cu_idx; __u32 arg_cnt; __u32 args[] __counted_by(arg_cnt); }; -#define slot_dpu_has_space(offset, payload_size) \ - (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \ - offsetof(struct cmd_chain_slot_dpu, args[0])) struct cmd_chain_slot_dpu { __u64 inst_buf_addr; __u32 inst_size; diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index 606433d73236..26831ec69f89 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -9,7 +9,10 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/gpu_scheduler.h> +#include <linux/dma-buf.h> +#include <linux/dma-direct.h> #include <linux/iosys-map.h> +#include <linux/pagemap.h> #include <linux/vmalloc.h> #include "amdxdna_ctx.h" @@ -18,6 +21,8 @@ #define XDNA_MAX_CMD_BO_SIZE SZ_32K +MODULE_IMPORT_NS("DMA_BUF"); + static int amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap) { @@ -55,57 +60,38 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap) return 0; } -static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) -{ - struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); - struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); - - XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); - if (abo->pinned) - amdxdna_gem_unpin(abo); - - if (abo->type == AMDXDNA_BO_DEV) { - mutex_lock(&abo->client->mm_lock); - drm_mm_remove_node(&abo->mm_node); - mutex_unlock(&abo->client->mm_lock); - - vunmap(abo->mem.kva); - drm_gem_object_put(to_gobj(abo->dev_heap)); - drm_gem_object_release(gobj); - mutex_destroy(&abo->lock); - kfree(abo); - return; - } - - if (abo->type == AMDXDNA_BO_DEV_HEAP) - drm_mm_takedown(&abo->mm); - - drm_gem_vunmap_unlocked(gobj, &map); - mutex_destroy(&abo->lock); - drm_gem_shmem_free(&abo->base); -} - -static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { - .free = amdxdna_gem_obj_free, -}; - static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { - struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj, - mem.notifier); - struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier); + struct amdxdna_gem_obj *abo = mapp->abo; + struct amdxdna_dev *xdna; - XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d", - abo->mem.userptr, abo->mem.size, abo->type); + xdna = to_xdna_dev(to_gobj(abo)->dev); + XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d", + mapp->vma->vm_start, mapp->vma->vm_end, abo->type); if (!mmu_notifier_range_blockable(range)) return false; + down_write(&xdna->notifier_lock); + abo->mem.map_invalid = true; + mapp->invalid = true; + mmu_interval_set_seq(&mapp->notifier, cur_seq); + up_write(&xdna->notifier_lock); + xdna->dev_info->ops->hmm_invalidate(abo, cur_seq); + if (range->event == MMU_NOTIFY_UNMAP) { + down_write(&xdna->notifier_lock); + if (!mapp->unmapped) { + queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); + mapp->unmapped = true; + } + up_write(&xdna->notifier_lock); + } + return true; } @@ -113,102 +99,310 @@ static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = { .invalidate = amdxdna_hmm_invalidate, }; -static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo) +static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo, + struct vm_area_struct *vma) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + struct amdxdna_umap *mapp; + + down_read(&xdna->notifier_lock); + list_for_each_entry(mapp, &abo->mem.umap_list, node) { + if (!vma || mapp->vma == vma) { + if (!mapp->unmapped) { + queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); + mapp->unmapped = true; + } + if (vma) + break; + } + } + up_read(&xdna->notifier_lock); +} - if (!xdna->dev_info->ops->hmm_invalidate) - return; +static void amdxdna_umap_release(struct kref *ref) +{ + struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt); + struct vm_area_struct *vma = mapp->vma; + struct amdxdna_dev *xdna; + + mmu_interval_notifier_remove(&mapp->notifier); + if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping) + mapping_clear_unevictable(vma->vm_file->f_mapping); + + xdna = to_xdna_dev(to_gobj(mapp->abo)->dev); + down_write(&xdna->notifier_lock); + list_del(&mapp->node); + up_write(&xdna->notifier_lock); + + kvfree(mapp->range.hmm_pfns); + kfree(mapp); +} + +void amdxdna_umap_put(struct amdxdna_umap *mapp) +{ + kref_put(&mapp->refcnt, amdxdna_umap_release); +} + +static void amdxdna_hmm_unreg_work(struct work_struct *work) +{ + struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap, + hmm_unreg_work); - mmu_interval_notifier_remove(&abo->mem.notifier); - kvfree(abo->mem.pfns); - abo->mem.pfns = NULL; + amdxdna_umap_put(mapp); } -static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr, - size_t len) +static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, + struct vm_area_struct *vma) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + unsigned long len = vma->vm_end - vma->vm_start; + unsigned long addr = vma->vm_start; + struct amdxdna_umap *mapp; u32 nr_pages; int ret; if (!xdna->dev_info->ops->hmm_invalidate) return 0; - if (abo->mem.pfns) - return -EEXIST; + mapp = kzalloc(sizeof(*mapp), GFP_KERNEL); + if (!mapp) + return -ENOMEM; nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT; - abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns), - GFP_KERNEL); - if (!abo->mem.pfns) - return -ENOMEM; + mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns), + GFP_KERNEL); + if (!mapp->range.hmm_pfns) { + ret = -ENOMEM; + goto free_map; + } - ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier, + ret = mmu_interval_notifier_insert_locked(&mapp->notifier, current->mm, addr, len, &amdxdna_hmm_ops); if (ret) { XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret); - kvfree(abo->mem.pfns); + goto free_pfns; } - abo->mem.userptr = addr; + mapp->range.notifier = &mapp->notifier; + mapp->range.start = vma->vm_start; + mapp->range.end = vma->vm_end; + mapp->range.default_flags = HMM_PFN_REQ_FAULT; + mapp->vma = vma; + mapp->abo = abo; + kref_init(&mapp->refcnt); + + if (abo->mem.userptr == AMDXDNA_INVALID_ADDR) + abo->mem.userptr = addr; + INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work); + if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping) + mapping_set_unevictable(vma->vm_file->f_mapping); + + down_write(&xdna->notifier_lock); + list_add_tail(&mapp->node, &abo->mem.umap_list); + up_write(&xdna->notifier_lock); + + return 0; + +free_pfns: + kvfree(mapp->range.hmm_pfns); +free_map: + kfree(mapp); return ret; } +static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, + struct vm_area_struct *vma) +{ + struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + unsigned long num_pages = vma_pages(vma); + unsigned long offset = 0; + int ret; + + if (!is_import_bo(abo)) { + ret = drm_gem_shmem_mmap(&abo->base, vma); + if (ret) { + XDNA_ERR(xdna, "Failed shmem mmap %d", ret); + return ret; + } + + /* The buffer is based on memory pages. Fix the flag. */ + vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); + ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, + &num_pages); + if (ret) { + XDNA_ERR(xdna, "Failed insert pages %d", ret); + vma->vm_ops->close(vma); + return ret; + } + + return 0; + } + + vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0); + if (ret) { + XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret); + return ret; + } + + do { + vm_fault_t fault_ret; + + fault_ret = handle_mm_fault(vma, vma->vm_start + offset, + FAULT_FLAG_WRITE, NULL); + if (fault_ret & VM_FAULT_ERROR) { + vma->vm_ops->close(vma); + XDNA_ERR(xdna, "Fault in page failed"); + return -EFAULT; + } + + offset += PAGE_SIZE; + } while (--num_pages); + + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + drm_gem_object_put(to_gobj(abo)); + + return 0; +} + static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj, struct vm_area_struct *vma) { + struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - unsigned long num_pages; int ret; - ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size); + ret = amdxdna_hmm_register(abo, vma); if (ret) return ret; + ret = amdxdna_insert_pages(abo, vma); + if (ret) { + XDNA_ERR(xdna, "Failed insert pages, ret %d", ret); + goto hmm_unreg; + } + + XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx", + drm_vma_node_offset_addr(&gobj->vma_node), abo->type, + vma->vm_start, gobj->size); + return 0; + +hmm_unreg: + amdxdna_hmm_unregister(abo, vma); + return ret; +} + +static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + struct drm_gem_object *gobj = dma_buf->priv; + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); + unsigned long num_pages = vma_pages(vma); + int ret; + + vma->vm_ops = &drm_gem_shmem_vm_ops; + vma->vm_private_data = gobj; + + drm_gem_object_get(gobj); ret = drm_gem_shmem_mmap(&abo->base, vma); if (ret) - goto hmm_unreg; + goto put_obj; - num_pages = gobj->size >> PAGE_SHIFT; - /* Try to insert the pages */ + /* The buffer is based on memory pages. Fix the flag. */ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); - ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages); + ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, + &num_pages); if (ret) - XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret); + goto close_vma; return 0; -hmm_unreg: - amdxdna_hmm_unregister(abo); +close_vma: + vma->vm_ops->close(vma); +put_obj: + drm_gem_object_put(gobj); return ret; } -static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf) +static const struct dma_buf_ops amdxdna_dmabuf_ops = { + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = amdxdna_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; + +static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) { - return drm_gem_shmem_vm_ops.fault(vmf); + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &amdxdna_dmabuf_ops; + exp_info.size = gobj->size; + exp_info.flags = flags; + exp_info.priv = gobj; + exp_info.resv = gobj->resv; + + return drm_gem_dmabuf_export(gobj->dev, &exp_info); } -static void amdxdna_gem_vm_open(struct vm_area_struct *vma) +static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo) { - drm_gem_shmem_vm_ops.open(vma); + dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(abo->dma_buf, abo->attach); + dma_buf_put(abo->dma_buf); + drm_gem_object_release(to_gobj(abo)); + kfree(abo); } -static void amdxdna_gem_vm_close(struct vm_area_struct *vma) +static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) { - struct drm_gem_object *gobj = vma->vm_private_data; + struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); + + XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); + + amdxdna_hmm_unregister(abo, NULL); + flush_workqueue(xdna->notifier_wq); + + if (abo->pinned) + amdxdna_gem_unpin(abo); - amdxdna_hmm_unregister(to_xdna_obj(gobj)); - drm_gem_shmem_vm_ops.close(vma); + if (abo->type == AMDXDNA_BO_DEV) { + mutex_lock(&abo->client->mm_lock); + drm_mm_remove_node(&abo->mm_node); + mutex_unlock(&abo->client->mm_lock); + + vunmap(abo->mem.kva); + drm_gem_object_put(to_gobj(abo->dev_heap)); + drm_gem_object_release(gobj); + mutex_destroy(&abo->lock); + kfree(abo); + return; + } + + if (abo->type == AMDXDNA_BO_DEV_HEAP) + drm_mm_takedown(&abo->mm); + + drm_gem_vunmap(gobj, &map); + mutex_destroy(&abo->lock); + + if (is_import_bo(abo)) { + amdxdna_imported_obj_free(abo); + return; + } + + drm_gem_shmem_free(&abo->base); } -static const struct vm_operations_struct amdxdna_gem_vm_ops = { - .fault = amdxdna_gem_vm_fault, - .open = amdxdna_gem_vm_open, - .close = amdxdna_gem_vm_close, +static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { + .free = amdxdna_gem_obj_free, }; static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { @@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = amdxdna_gem_obj_mmap, - .vm_ops = &amdxdna_gem_vm_ops, + .vm_ops = &drm_gem_shmem_vm_ops, + .export = amdxdna_gem_prime_export, }; static struct amdxdna_gem_obj * @@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size) abo->mem.userptr = AMDXDNA_INVALID_ADDR; abo->mem.dev_addr = AMDXDNA_INVALID_ADDR; abo->mem.size = size; + INIT_LIST_HEAD(&abo->mem.umap_list); return abo; } @@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size) return to_gobj(abo); } +struct drm_gem_object * +amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct amdxdna_gem_obj *abo; + struct drm_gem_object *gobj; + struct sg_table *sgt; + int ret; + + get_dma_buf(dma_buf); + + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) { + ret = PTR_ERR(attach); + goto put_buf; + } + + sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto fail_detach; + } + + gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(gobj)) { + ret = PTR_ERR(gobj); + goto fail_unmap; + } + + abo = to_xdna_obj(gobj); + abo->attach = attach; + abo->dma_buf = dma_buf; + + return gobj; + +fail_unmap: + dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); +put_buf: + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} + static struct amdxdna_gem_obj * amdxdna_drm_alloc_shmem(struct drm_device *dev, struct amdxdna_drm_create_bo *args, @@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; - ret = drm_gem_vmap_unlocked(to_gobj(abo), &map); + ret = drm_gem_vmap(to_gobj(abo), &map); if (ret) { XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); goto release_obj; @@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo) struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); int ret; + if (is_import_bo(abo)) + return 0; + switch (abo->type) { case AMDXDNA_BO_SHMEM: case AMDXDNA_BO_DEV_HEAP: @@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo) void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo) { + if (is_import_bo(abo)) + return; + if (abo->type == AMDXDNA_BO_DEV) abo = abo->dev_heap; @@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, goto put_obj; } - if (abo->type == AMDXDNA_BO_DEV) + if (is_import_bo(abo)) + drm_clflush_sg(abo->base.sgt); + else if (abo->type == AMDXDNA_BO_DEV) drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages); else drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT); diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h index 8ccc0375dd9d..aee97e971d6d 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.h +++ b/drivers/accel/amdxdna/amdxdna_gem.h @@ -6,6 +6,20 @@ #ifndef _AMDXDNA_GEM_H_ #define _AMDXDNA_GEM_H_ +#include <linux/hmm.h> + +struct amdxdna_umap { + struct vm_area_struct *vma; + struct mmu_interval_notifier notifier; + struct hmm_range range; + struct work_struct hmm_unreg_work; + struct amdxdna_gem_obj *abo; + struct list_head node; + struct kref refcnt; + bool invalid; + bool unmapped; +}; + struct amdxdna_mem { u64 userptr; void *kva; @@ -13,8 +27,7 @@ struct amdxdna_mem { size_t size; struct page **pages; u32 nr_pages; - struct mmu_interval_notifier notifier; - unsigned long *pfns; + struct list_head umap_list; bool map_invalid; }; @@ -31,9 +44,12 @@ struct amdxdna_gem_obj { struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */ u32 assigned_hwctx; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attach; }; #define to_gobj(obj) (&(obj)->base.base) +#define is_import_bo(obj) ((obj)->attach) static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj) { @@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo) drm_gem_object_put(to_gobj(abo)); } +void amdxdna_umap_put(struct amdxdna_umap *mapp); + struct drm_gem_object * amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size); +struct drm_gem_object * +amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct amdxdna_gem_obj * amdxdna_drm_alloc_dev_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index f5b8497cf5ad..f2bf1d374cc7 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = { .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls), .gem_create_object = amdxdna_gem_create_object_cb, + .gem_prime_import = amdxdna_gem_prime_import, }; static const struct amdxdna_dev_info * @@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id) fs_reclaim_release(GFP_KERNEL); } + xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0); + if (!xdna->notifier_wq) + return -ENOMEM; + mutex_lock(&xdna->dev_lock); ret = xdna->dev_info->ops->init(xdna); mutex_unlock(&xdna->dev_lock); if (ret) { XDNA_ERR(xdna, "Hardware init failed, ret %d", ret); - return ret; + goto destroy_notifier_wq; } ret = amdxdna_sysfs_init(xdna); @@ -301,6 +306,8 @@ failed_dev_fini: mutex_lock(&xdna->dev_lock); xdna->dev_info->ops->fini(xdna); mutex_unlock(&xdna->dev_lock); +destroy_notifier_wq: + destroy_workqueue(xdna->notifier_wq); return ret; } @@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev) struct device *dev = &pdev->dev; struct amdxdna_client *client; + destroy_workqueue(xdna->notifier_wq); + pm_runtime_get_noresume(dev); pm_runtime_forbid(dev); diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index 37848a8d8031..ab79600911aa 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -6,6 +6,7 @@ #ifndef _AMDXDNA_PCI_DRV_H_ #define _AMDXDNA_PCI_DRV_H_ +#include <linux/workqueue.h> #include <linux/xarray.h> #define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args) @@ -98,6 +99,7 @@ struct amdxdna_dev { struct list_head client_list; struct amdxdna_fw_ver fw_ver; struct rw_semaphore notifier_lock; /* for mmu notifier*/ + struct workqueue_struct *notifier_wq; }; /* diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index eff1d3ca075f..0e7748c5e117 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -374,6 +374,9 @@ int ivpu_boot(struct ivpu_device *vdev) { int ret; + drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter)); + drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); + /* Update boot params located at first 4KB of FW memory */ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); @@ -573,6 +576,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev) vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; atomic64_set(&vdev->unique_id_counter, 0); + atomic_set(&vdev->job_timeout_counter, 0); xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 92753effb1c9..5497e7030e91 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -154,6 +154,7 @@ struct ivpu_device { struct mutex submitted_jobs_lock; /* Protects submitted_jobs */ struct xarray submitted_jobs_xa; struct ivpu_ipc_consumer job_done_consumer; + atomic_t job_timeout_counter; atomic64_t unique_id_counter; diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 1d0b2bd9d65c..9a3935be1c05 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -39,6 +39,7 @@ struct ivpu_fw_info { u64 read_only_addr; u32 read_only_size; u32 sched_mode; + u64 last_heartbeat; }; int ivpu_fw_init(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 8741c73b92ce..e0d242d9f3e5 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -30,7 +30,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0, (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, - (bool)bo->base.base.import_attach); + (bool)drm_gem_is_imported(&bo->base.base)); } /* @@ -122,7 +122,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) bo->ctx = NULL; } - if (bo->base.base.import_attach) + if (drm_gem_is_imported(&bo->base.base)) return; dma_resv_lock(bo->base.base.resv, NULL); @@ -282,7 +282,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) ivpu_bo_unbind_locked(bo); mutex_destroy(&bo->lock); - drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1); + drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); drm_gem_shmem_free(&bo->base); } @@ -362,7 +362,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, if (flags & DRM_IVPU_BO_MAPPABLE) { dma_resv_lock(bo->base.base.resv, NULL); - ret = drm_gem_shmem_vmap(&bo->base, &map); + ret = drm_gem_shmem_vmap_locked(&bo->base, &map); dma_resv_unlock(bo->base.base.resv); if (ret) @@ -387,7 +387,7 @@ void ivpu_bo_free(struct ivpu_bo *bo) if (bo->flags & DRM_IVPU_BO_MAPPABLE) { dma_resv_lock(bo->base.base.resv, NULL); - drm_gem_shmem_vunmap(&bo->base, &map); + drm_gem_shmem_vunmap_locked(&bo->base, &map); dma_resv_unlock(bo->base.base.resv); } @@ -461,7 +461,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) if (bo->mmu_mapped) drm_printf(p, " mmu_mapped"); - if (bo->base.base.import_attach) + if (drm_gem_is_imported(&bo->base.base)) drm_printf(p, " imported"); drm_printf(p, "\n"); diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index ac0e22454596..ea30db181cd7 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -34,6 +34,7 @@ module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644); MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default"); #define PM_RESCHEDULE_LIMIT 5 +#define PM_TDR_HEARTBEAT_LIMIT 30 static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) { @@ -44,6 +45,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) ivpu_fw_log_reset(vdev); ivpu_fw_load(vdev); fw->entry_point = fw->cold_boot_entry_point; + fw->last_heartbeat = 0; } static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) @@ -189,7 +191,24 @@ static void ivpu_job_timeout_work(struct work_struct *work) { struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work); struct ivpu_device *vdev = pm->vdev; + u64 heartbeat; + if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) { + ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n"); + goto recovery; + } + + if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) { + ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n"); + goto recovery; + } + + vdev->fw->last_heartbeat = heartbeat; + ivpu_start_job_timeout_detection(vdev); + return; + +recovery: + atomic_set(&vdev->job_timeout_counter, 0); ivpu_pm_trigger_recovery(vdev, "TDR"); } @@ -204,6 +223,7 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev) void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev) { cancel_delayed_work_sync(&vdev->pm->job_timeout_work); + atomic_set(&vdev->job_timeout_counter, 0); } int ivpu_pm_suspend_cb(struct device *dev) diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 43aba57b48f0..1bce1af7c72c 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -609,7 +609,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc struct scatterlist *sg; int ret = 0; - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return -EINVAL; for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) { @@ -630,7 +630,7 @@ static void qaic_free_object(struct drm_gem_object *obj) { struct qaic_bo *bo = to_qaic_bo(obj); - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { /* DMABUF/PRIME Path */ drm_prime_gem_destroy(obj, NULL); } else { @@ -870,7 +870,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo, { int ret; - if (bo->base.import_attach) + if (drm_gem_is_imported(&bo->base)) ret = qaic_prepare_import_bo(bo, hdr); else ret = qaic_prepare_export_bo(qdev, bo, hdr); @@ -894,7 +894,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo) { - if (bo->base.import_attach) + if (drm_gem_is_imported(&bo->base)) qaic_unprepare_import_bo(bo); else qaic_unprepare_export_bo(qdev, bo); diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c index ba0cf2f94732..a991b8198dc4 100644 --- a/drivers/accel/qaic/qaic_debugfs.c +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d mhi_unprepare: mhi_unprepare_from_transfer(mhi_dev); destroy_workqueue: - flush_workqueue(qdev->bootlog_wq); destroy_workqueue(qdev->bootlog_wq); out: return ret; @@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev) qdev = dev_get_drvdata(&mhi_dev->dev); mhi_unprepare_from_transfer(qdev->bootlog_ch); - flush_workqueue(qdev->bootlog_wq); destroy_workqueue(qdev->bootlog_wq); qdev->bootlog_ch = NULL; } diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index ef30445527a2..bcc26785175d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -53,6 +53,7 @@ struct intel_gtt_driver { * of the mmio register file, that's done in the generic code. */ void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); + dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ @@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i810_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u32 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = val & I810_PTE_LOCAL; + + return val & ~0xfff; +} + static resource_size_t intel_gtt_stolen_size(void) { u16 gmch_ctrl; @@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i830_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u32 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = false; + + return val & ~0xfff; +} + bool intel_gmch_enable_gtt(void) { u8 __iomem *reg; @@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, } EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries); +dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg, + bool *is_present, bool *is_local) +{ + return intel_private.driver->read_entry(pg, is_present, is_local); +} +EXPORT_SYMBOL(intel_gmch_gtt_read_entry); + #if IS_ENABLED(CONFIG_AGP_INTEL) static void intel_gmch_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, @@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr, writel_relaxed(addr | pte_flags, intel_private.gtt + entry); } +static dma_addr_t i965_read_entry(unsigned int entry, + bool *is_present, bool *is_local) +{ + u64 val; + + val = readl(intel_private.gtt + entry); + + *is_present = val & I810_PTE_VALID; + *is_local = false; + + return ((val & 0xf0) << 28) | (val & ~0xfff); +} + static int i9xx_setup(void) { phys_addr_t reg_addr; @@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = { .cleanup = i810_cleanup, .check_flags = i830_check_flags, .write_entry = i810_write_entry, + .read_entry = i810_read_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, @@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, + .read_entry = i830_read_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i830_chipset_flush, @@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, + .read_entry = i830_read_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, + .read_entry = i965_read_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 5baa83b85515..0c48d41dd5eb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -636,10 +636,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) || !exp_info->ops->release)) return ERR_PTR(-EINVAL); - if (WARN_ON(exp_info->ops->cache_sgt_mapping && - (exp_info->ops->pin || exp_info->ops->unpin))) - return ERR_PTR(-EINVAL); - if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); @@ -782,7 +778,7 @@ static void mangle_sg_table(struct sg_table *sg_table) /* To catch abuse of the underlying struct page by importers mix * up the bits, but take care to preserve the low SG_ bits to - * not corrupt the sgt. The mixing is undone in __unmap_dma_buf + * not corrupt the sgt. The mixing is undone on unmap * before passing the sgt back to the exporter. */ for_each_sgtable_sg(sg_table, sg, i) @@ -790,29 +786,19 @@ static void mangle_sg_table(struct sg_table *sg_table) #endif } -static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction direction) -{ - struct sg_table *sg_table; - signed long ret; - - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); - if (IS_ERR_OR_NULL(sg_table)) - return sg_table; - if (!dma_buf_attachment_is_dynamic(attach)) { - ret = dma_resv_wait_timeout(attach->dmabuf->resv, - DMA_RESV_USAGE_KERNEL, true, - MAX_SCHEDULE_TIMEOUT); - if (ret < 0) { - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, - direction); - return ERR_PTR(ret); - } - } +static inline bool +dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) +{ + return !!attach->importer_ops; +} - mangle_sg_table(sg_table); - return sg_table; +static bool +dma_buf_pin_on_map(struct dma_buf_attachment *attach) +{ + return attach->dmabuf->ops->pin && + (!dma_buf_attachment_is_dynamic(attach) || + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)); } /** @@ -935,48 +921,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, list_add(&attach->node, &dmabuf->attachments); dma_resv_unlock(dmabuf->resv); - /* When either the importer or the exporter can't handle dynamic - * mappings we cache the mapping here to avoid issues with the - * reservation object lock. - */ - if (dma_buf_attachment_is_dynamic(attach) != - dma_buf_is_dynamic(dmabuf)) { - struct sg_table *sgt; - - dma_resv_lock(attach->dmabuf->resv, NULL); - if (dma_buf_is_dynamic(attach->dmabuf)) { - ret = dmabuf->ops->pin(attach); - if (ret) - goto err_unlock; - } - - sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); - if (!sgt) - sgt = ERR_PTR(-ENOMEM); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto err_unpin; - } - dma_resv_unlock(attach->dmabuf->resv); - attach->sgt = sgt; - attach->dir = DMA_BIDIRECTIONAL; - } - return attach; err_attach: kfree(attach); return ERR_PTR(ret); - -err_unpin: - if (dma_buf_is_dynamic(attach->dmabuf)) - dmabuf->ops->unpin(attach); - -err_unlock: - dma_resv_unlock(attach->dmabuf->resv); - - dma_buf_detach(dmabuf, attach); - return ERR_PTR(ret); } EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF"); @@ -995,16 +944,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, } EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF"); -static void __unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sg_table, - enum dma_data_direction direction) -{ - /* uses XOR, hence this unmangles */ - mangle_sg_table(sg_table); - - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); -} - /** * dma_buf_detach - Remove the given attachment from dmabuf's attachments list * @dmabuf: [in] buffer to detach from. @@ -1020,16 +959,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) return; dma_resv_lock(dmabuf->resv, NULL); - - if (attach->sgt) { - - __unmap_dma_buf(attach, attach->sgt, attach->dir); - - if (dma_buf_is_dynamic(attach->dmabuf)) - dmabuf->ops->unpin(attach); - } list_del(&attach->node); - dma_resv_unlock(dmabuf->resv); if (dmabuf->ops->detach) @@ -1058,7 +988,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach) struct dma_buf *dmabuf = attach->dmabuf; int ret = 0; - WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + WARN_ON(!attach->importer_ops); dma_resv_assert_held(dmabuf->resv); @@ -1081,7 +1011,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach) { struct dma_buf *dmabuf = attach->dmabuf; - WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + WARN_ON(!attach->importer_ops); dma_resv_assert_held(dmabuf->resv); @@ -1115,7 +1045,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; - int r; + signed long ret; might_sleep(); @@ -1124,41 +1054,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); - if (attach->sgt) { + if (dma_buf_pin_on_map(attach)) { + ret = attach->dmabuf->ops->pin(attach); /* - * Two mappings with different directions for the same - * attachment are not allowed. + * Catch exporters making buffers inaccessible even when + * attachments preventing that exist. */ - if (attach->dir != direction && - attach->dir != DMA_BIDIRECTIONAL) - return ERR_PTR(-EBUSY); - - return attach->sgt; - } - - if (dma_buf_is_dynamic(attach->dmabuf)) { - if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { - r = attach->dmabuf->ops->pin(attach); - if (r) - return ERR_PTR(r); - } + WARN_ON_ONCE(ret == EBUSY); + if (ret) + return ERR_PTR(ret); } - sg_table = __map_dma_buf(attach, direction); + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); + if (IS_ERR(sg_table)) + goto error_unpin; - if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) - attach->dmabuf->ops->unpin(attach); - - if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { - attach->sgt = sg_table; - attach->dir = direction; + /* + * Importers with static attachments don't wait for fences. + */ + if (!dma_buf_attachment_is_dynamic(attach)) { + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) + goto error_unmap; } + mangle_sg_table(sg_table); #ifdef CONFIG_DMA_API_DEBUG - if (!IS_ERR(sg_table)) { + { struct scatterlist *sg; u64 addr; int len; @@ -1175,6 +1101,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } #endif /* CONFIG_DMA_API_DEBUG */ return sg_table; + +error_unmap: + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + sg_table = ERR_PTR(ret); + +error_unpin: + if (dma_buf_pin_on_map(attach)) + attach->dmabuf->ops->unpin(attach); + + return sg_table; } EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF"); @@ -1227,14 +1163,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); - if (attach->sgt == sg_table) - return; - - __unmap_dma_buf(attach, sg_table, direction); + mangle_sg_table(sg_table); + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); - if (dma_buf_is_dynamic(attach->dmabuf) && - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) - dma_buf_unpin(attach); + if (dma_buf_pin_on_map(attach)) + attach->dmabuf->ops->unpin(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF"); diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 26d5dc89ea16..82b1b714300d 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -21,8 +21,6 @@ #include <linux/slab.h> #include <linux/vmalloc.h> -static struct dma_heap *sys_heap; - struct system_heap_buffer { struct dma_heap *heap; struct list_head attachments; @@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = { static int __init system_heap_create(void) { struct dma_heap_export_info exp_info; + struct dma_heap *sys_heap; exp_info.name = "system"; exp_info.ops = &system_heap_ops; diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 22a808995f10..4f27ee93a00c 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence) return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); } -static void timeline_fence_value_str(struct dma_fence *fence, - char *str, int size) -{ - snprintf(str, size, "%lld", fence->seqno); -} - -static void timeline_fence_timeline_value_str(struct dma_fence *fence, - char *str, int size) -{ - struct sync_timeline *parent = dma_fence_parent(fence); - - snprintf(str, size, "%d", parent->value); -} - static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) { struct sync_pt *pt = dma_fence_to_sync_pt(fence); @@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = { .get_timeline_name = timeline_fence_get_timeline_name, .signaled = timeline_fence_signaled, .release = timeline_fence_release, - .fence_value_str = timeline_fence_value_str, - .timeline_value_str = timeline_fence_timeline_value_str, .set_deadline = timeline_fence_set_deadline, }; diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 237bce21d1e7..270daae7d89a 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -82,25 +82,8 @@ static void sync_print_fence(struct seq_file *s, seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); } - if (fence->ops->timeline_value_str && - fence->ops->fence_value_str) { - char value[64]; - bool success; - - fence->ops->fence_value_str(fence, value, sizeof(value)); - success = strlen(value); - - if (success) { - seq_printf(s, ": %s", value); - - fence->ops->timeline_value_str(fence, value, - sizeof(value)); - - if (strlen(value)) - seq_printf(s, " / %s", value); - } - } - + seq_printf(s, ": %lld", fence->seqno); + seq_printf(s, " / %d", parent->value); seq_putc(s, '\n'); } diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index e74e36a8ecda..7eee3eb47a8e 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf, } static const struct dma_buf_ops udmabuf_ops = { - .cache_sgt_mapping = true, .map_dma_buf = map_udmabuf, .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 7309394b8fc9..e57bff702b5f 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -558,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) __weak __alias(__efi_mem_desc_lookup); +EXPORT_SYMBOL_GPL(efi_mem_desc_lookup); /* * Calculate the highest address of an efi memory descriptor. diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index 75a186bf8f8e..592d8a644619 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si, if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI) return false; - /* - * The meaning of depth and bpp for direct-color formats is - * inconsistent: - * - * - DRM format info specifies depth as the number of color - * bits; including alpha, but not including filler bits. - * - Linux' EFI platform code computes lfb_depth from the - * individual color channels, including the reserved bits. - * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later - * versions use 15. - * - On the kernel command line, 'bpp' of 32 is usually - * XRGB8888 including the filler bits, but 15 is XRGB1555 - * not including the filler bit. - * - * It's not easily possible to fix this in struct screen_info, - * as this could break UAPI. The best solution is to compute - * bits_per_pixel from the color bits, reserved bits and - * reported lfb_depth, whichever is highest. In the loop below, - * ignore simplefb formats with alpha bits, as EFI and VESA - * don't specify alpha channels. - */ - if (si->lfb_depth > 8) { - bits_per_pixel = max(max3(si->red_size + si->red_pos, - si->green_size + si->green_pos, - si->blue_size + si->blue_pos), - si->rsvd_size + si->rsvd_pos); - bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth); - } else { - bits_per_pixel = si->lfb_depth; - } + bits_per_pixel = __screen_info_lfb_bits_per_pixel(si); for (i = 0; i < ARRAY_SIZE(formats); ++i) { const struct simplefb_format *f = &formats[i]; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index f01925ed8176..5088698182d3 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -26,6 +26,11 @@ menuconfig DRM details. You should also select and configure AGP (/dev/agpgart) support if it is available for your platform. +menu "DRM debugging options" +depends on DRM +source "drivers/gpu/drm/Kconfig.debug" +endmenu + if DRM config DRM_MIPI_DBI @@ -37,65 +42,6 @@ config DRM_MIPI_DSI bool depends on DRM -config DRM_DEBUG_MM - bool "Insert extra checks and debug info into the DRM range managers" - default n - depends on DRM - depends on STACKTRACE_SUPPORT - select STACKDEPOT - help - Enable allocation tracking of memory manager and leak detection on - shutdown. - - Recommended for driver developers only. - - If in doubt, say "N". - -config DRM_USE_DYNAMIC_DEBUG - bool "use dynamic debug to implement drm.debug" - default n - depends on BROKEN - depends on DRM - depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE - depends on JUMP_LABEL - help - Use dynamic-debug to avoid drm_debug_enabled() runtime overheads. - Due to callsite counts in DRM drivers (~4k in amdgpu) and 56 - bytes per callsite, the .data costs can be substantial, and - are therefore configurable. - -config DRM_KUNIT_TEST_HELPERS - tristate - depends on DRM && KUNIT - select DRM_KMS_HELPER - help - KUnit Helpers for KMS drivers. - -config DRM_KUNIT_TEST - tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS - depends on DRM && KUNIT && MMU - select DRM_BUDDY - select DRM_DISPLAY_DP_HELPER - select DRM_DISPLAY_HDMI_STATE_HELPER - select DRM_DISPLAY_HELPER - select DRM_EXEC - select DRM_EXPORT_FOR_TESTS if m - select DRM_GEM_SHMEM_HELPER - select DRM_KUNIT_TEST_HELPERS - select DRM_LIB_RANDOM - select PRIME_NUMBERS - default KUNIT_ALL_TESTS - help - This builds unit tests for DRM. This option is not useful for - distributions or general kernels, but only for kernel - developers working on DRM and associated drivers. - - For more information on KUnit and unit tests in general, - please refer to the KUnit documentation in - Documentation/dev-tools/kunit/. - - If in doubt, say "N". - config DRM_KMS_HELPER tristate depends on DRM @@ -247,23 +193,6 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. -config DRM_TTM_KUNIT_TEST - tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS - default n - depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST) - select DRM_TTM - select DRM_BUDDY - select DRM_EXPORT_FOR_TESTS if m - select DRM_KUNIT_TEST_HELPERS - default KUNIT_ALL_TESTS - help - Enables unit tests for TTM, a GPU memory manager subsystem used - to manage memory buffers. This option is mostly useful for kernel - developers. It depends on (UML || COMPILE_TEST) since no other driver - which uses TTM can be loaded while running the tests. - - If in doubt, say "N". - config DRM_EXEC tristate depends on DRM @@ -335,6 +264,8 @@ config DRM_SCHED tristate depends on DRM +source "drivers/gpu/drm/sysfb/Kconfig" + source "drivers/gpu/drm/arm/Kconfig" source "drivers/gpu/drm/radeon/Kconfig" @@ -474,9 +405,6 @@ config DRM_HYPERV If M is selected the module will be called hyperv_drm. -config DRM_EXPORT_FOR_TESTS - bool - # Separate option as not all DRM drivers use it config DRM_PANEL_BACKLIGHT_QUIRKS tristate @@ -489,31 +417,6 @@ config DRM_PRIVACY_SCREEN bool default n -config DRM_WERROR - bool "Compile the drm subsystem with warnings as errors" - depends on DRM && EXPERT - depends on !WERROR - default n - help - A kernel build should not cause any compiler warnings, and this - enables the '-Werror' flag to enforce that rule in the drm subsystem. - - The drm subsystem enables more warnings than the kernel default, so - this config option is disabled by default. - - If in doubt, say N. - -config DRM_HEADER_TEST - bool "Ensure DRM headers are self-contained and pass kernel-doc" - depends on DRM && EXPERT && BROKEN - default n - help - Ensure the DRM subsystem headers both under drivers/gpu/drm and - include/drm compile, are self-contained, have header guards, and have - no kernel-doc warnings. - - If in doubt, say N. - endif # Separate option because drm_panel_orientation_quirks.c is shared with fbdev diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug new file mode 100644 index 000000000000..fa6ee76f4d3c --- /dev/null +++ b/drivers/gpu/drm/Kconfig.debug @@ -0,0 +1,116 @@ +config DRM_USE_DYNAMIC_DEBUG + bool "use dynamic debug to implement drm.debug" + default n + depends on BROKEN + depends on DRM + depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE + depends on JUMP_LABEL + help + Use dynamic-debug to avoid drm_debug_enabled() runtime overheads. + Due to callsite counts in DRM drivers (~4k in amdgpu) and 56 + bytes per callsite, the .data costs can be substantial, and + are therefore configurable. + +config DRM_WERROR + bool "Compile the drm subsystem with warnings as errors" + depends on DRM && EXPERT + depends on !WERROR + default n + help + A kernel build should not cause any compiler warnings, and this + enables the '-Werror' flag to enforce that rule in the drm subsystem. + + The drm subsystem enables more warnings than the kernel default, so + this config option is disabled by default. + + If in doubt, say N. + +config DRM_HEADER_TEST + bool "Ensure DRM headers are self-contained and pass kernel-doc" + depends on DRM && EXPERT && BROKEN + default n + help + Ensure the DRM subsystem headers both under drivers/gpu/drm and + include/drm compile, are self-contained, have header guards, and have + no kernel-doc warnings. + + If in doubt, say N. + +config DRM_DEBUG_MM + bool "Insert extra checks and debug info into the DRM range managers" + default n + depends on DRM + depends on STACKTRACE_SUPPORT + select STACKDEPOT + help + Enable allocation tracking of memory manager and leak detection on + shutdown. + + Recommended for driver developers only. + + If in doubt, say "N". + +config DRM_KUNIT_TEST_HELPERS + tristate + depends on DRM && KUNIT + select DRM_KMS_HELPER + help + KUnit Helpers for KMS drivers. + +config DRM_KUNIT_TEST + tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS + depends on DRM && KUNIT && MMU + select DRM_BRIDGE_CONNECTOR + select DRM_BUDDY + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_EXEC + select DRM_EXPORT_FOR_TESTS if m + select DRM_GEM_SHMEM_HELPER + select DRM_KUNIT_TEST_HELPERS + select DRM_LIB_RANDOM + select PRIME_NUMBERS + default KUNIT_ALL_TESTS + help + This builds unit tests for DRM. This option is not useful for + distributions or general kernels, but only for kernel + developers working on DRM and associated drivers. + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in + Documentation/dev-tools/kunit/. + + If in doubt, say "N". + +config DRM_TTM_KUNIT_TEST + tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS + default n + depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST) + select DRM_TTM + select DRM_BUDDY + select DRM_EXPORT_FOR_TESTS if m + select DRM_KUNIT_TEST_HELPERS + default KUNIT_ALL_TESTS + help + Enables unit tests for TTM, a GPU memory manager subsystem used + to manage memory buffers. This option is mostly useful for kernel + developers. It depends on (UML || COMPILE_TEST) since no other driver + which uses TTM can be loaded while running the tests. + + If in doubt, say "N". + +config DRM_SCHED_KUNIT_TEST + tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS + select DRM_SCHED + depends on DRM && KUNIT + default KUNIT_ALL_TESTS + help + Choose this option to build unit tests for the DRM scheduler. + + Recommended for driver developers only. + + If in doubt, say "N". + +config DRM_EXPORT_FOR_TESTS + bool diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index ed54a546bbe2..b5d5561bbe5f 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -134,6 +134,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o drm_kms_helper-y := \ drm_atomic_helper.o \ drm_atomic_state_helper.o \ + drm_bridge_helper.o \ drm_crtc_helper.o \ drm_damage_helper.o \ drm_flip_work.o \ @@ -204,6 +205,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/ obj-y += hisilicon/ obj-y += mxsfb/ +obj-y += sysfb/ obj-y += tiny/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c index ad80542b60ed..2b60128e2c69 100644 --- a/drivers/gpu/drm/adp/adp-mipi.c +++ b/drivers/gpu/drm/adp/adp-mipi.c @@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = { }; static int adp_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct adp_mipi_drv_private *adp = container_of(bridge, struct adp_mipi_drv_private, bridge); - return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags); } static const struct drm_bridge_funcs adp_dsi_bridge_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 521b9faab180..492813ab1b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect u8 link_status[DP_LINK_STATUS_SIZE]; struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv; - if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status) - <= 0) + if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, + link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; @@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index 139ab00dee8f..2d3ad7610c2e 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -37,6 +37,7 @@ */ /* define for signature structure */ +#define AST_HWC_SIGNATURE_SIZE SZ_32 #define AST_HWC_SIGNATURE_CHECKSUM 0x00 #define AST_HWC_SIGNATURE_SizeX 0x04 #define AST_HWC_SIGNATURE_SizeY 0x08 @@ -45,6 +46,21 @@ #define AST_HWC_SIGNATURE_HOTSPOTX 0x14 #define AST_HWC_SIGNATURE_HOTSPOTY 0x18 +static unsigned long ast_cursor_vram_size(void) +{ + return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE; +} + +long ast_cursor_vram_offset(struct ast_device *ast) +{ + unsigned long size = ast_cursor_vram_size(); + + if (size > ast->vram_size) + return -EINVAL; + + return ALIGN_DOWN(ast->vram_size - size, SZ_8); +} + static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height) { u32 csum = 0; @@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un static void ast_set_cursor_image(struct ast_device *ast, const u8 *src, unsigned int width, unsigned int height) { - u8 __iomem *dst = ast->cursor_plane.base.vaddr; + u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base); u32 csum; csum = ast_cursor_calculate_checksum(src, width, height); @@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, struct ast_device *ast = to_ast_device(plane->dev); struct drm_rect damage; u64 dst_off = ast_plane->offset; - u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */ + u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */ u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */ unsigned int offset_x, offset_y; u16 x, y; @@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast) struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane; struct ast_plane *ast_plane = &ast_cursor_plane->base; struct drm_plane *cursor_plane = &ast_plane->base; - size_t size; - void __iomem *vaddr; - u64 offset; + unsigned long size; + long offset; int ret; - /* - * Allocate backing storage for cursors. The BOs are permanently - * pinned to the top end of the VRAM. - */ - - size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); + size = ast_cursor_vram_size(); + offset = ast_cursor_vram_offset(ast); + if (offset < 0) + return offset; - if (ast->vram_fb_available < size) - return -ENOMEM; - - vaddr = ast->vram + ast->vram_fb_available - size; - offset = ast->vram_fb_available - size; - - ret = ast_plane_init(dev, ast_plane, vaddr, offset, size, + ret = ast_plane_init(dev, ast_plane, offset, size, 0x01, &ast_cursor_plane_funcs, ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats), NULL, DRM_PLANE_TYPE_CURSOR); @@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast) drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs); drm_plane_enable_fb_damage_clips(cursor_plane); - ast->vram_fb_available -= size; - return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index d2c2605d2728..2ee402096cd9 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -112,12 +112,9 @@ enum ast_config_mode { #define AST_MAX_HWC_WIDTH 64 #define AST_MAX_HWC_HEIGHT 64 - #define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2) #define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH) -#define AST_HWC_SIGNATURE_SIZE 32 - /* * Planes */ @@ -125,7 +122,6 @@ enum ast_config_mode { struct ast_plane { struct drm_plane base; - void __iomem *vaddr; u64 offset; unsigned long size; }; @@ -183,7 +179,6 @@ struct ast_device { void __iomem *vram; unsigned long vram_base; unsigned long vram_size; - unsigned long vram_fb_available; struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ @@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val); } -#define AST_VIDMEM_SIZE_8M 0x00800000 -#define AST_VIDMEM_SIZE_16M 0x01000000 -#define AST_VIDMEM_SIZE_32M 0x02000000 -#define AST_VIDMEM_SIZE_64M 0x04000000 -#define AST_VIDMEM_SIZE_128M 0x08000000 - -#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M - struct ast_vbios_stdtable { u8 misc; u8 seq[4]; @@ -440,6 +427,7 @@ int ast_vga_output_init(struct ast_device *ast); int ast_sil164_output_init(struct ast_device *ast); /* ast_cursor.c */ +long ast_cursor_vram_offset(struct ast_device *ast); int ast_cursor_plane_init(struct ast_device *ast); /* ast dp501 */ @@ -454,11 +442,12 @@ int ast_astdp_output_init(struct ast_device *ast); /* ast_mode.c */ int ast_mode_config_init(struct ast_device *ast); int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane, - void __iomem *vaddr, u64 offset, unsigned long size, + u64 offset, unsigned long size, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type type); +void __iomem *ast_plane_vaddr(struct ast_plane *ast); #endif diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 6dfe6d9777d4..0bc140319464 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -35,36 +35,35 @@ static u32 ast_get_vram_size(struct ast_device *ast) { - u8 jreg; u32 vram_size; + u8 vgacr99, vgacraa; - vram_size = AST_VIDMEM_DEFAULT_SIZE; - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff); - switch (jreg & 3) { + vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa); + switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) { case 0: - vram_size = AST_VIDMEM_SIZE_8M; + vram_size = SZ_8M; break; case 1: - vram_size = AST_VIDMEM_SIZE_16M; + vram_size = SZ_16M; break; case 2: - vram_size = AST_VIDMEM_SIZE_32M; + vram_size = SZ_32M; break; case 3: - vram_size = AST_VIDMEM_SIZE_64M; + vram_size = SZ_64M; break; } - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff); - switch (jreg & 0x03) { + vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99); + switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) { case 1: - vram_size -= 0x100000; + vram_size -= SZ_1M; break; case 2: - vram_size -= 0x200000; + vram_size -= SZ_2M; break; case 3: - vram_size -= 0x400000; + vram_size -= SZ_4M; break; } @@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast) ast->vram_base = base; ast->vram_size = vram_size; - ast->vram_fb_available = vram_size; return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c3b950675485..1de832964e92 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -51,6 +51,26 @@ #define AST_LUT_SIZE 256 +#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1) + +static unsigned long ast_fb_vram_offset(void) +{ + return 0; // with shmem, the primary plane is always at offset 0 +} + +static unsigned long ast_fb_vram_size(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + unsigned long offset = ast_fb_vram_offset(); // starts at offset + long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset + + if (cursor_offset < 0) + cursor_offset = ast->vram_size; // no cursor; it's all ours + if (drm_WARN_ON_ONCE(dev, offset > cursor_offset)) + return 0; // cannot legally happen; signal error + return cursor_offset - offset; +} + static inline void ast_load_palette_index(struct ast_device *ast, u8 index, u8 red, u8 green, u8 blue) @@ -439,7 +459,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast) */ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane, - void __iomem *vaddr, u64 offset, unsigned long size, + u64 offset, unsigned long size, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, @@ -448,7 +468,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane, { struct drm_plane *plane = &ast_plane->base; - ast_plane->vaddr = vaddr; ast_plane->offset = offset; ast_plane->size = size; @@ -457,6 +476,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane, type, NULL); } +void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane) +{ + struct ast_device *ast = to_ast_device(ast_plane->base.dev); + + return ast->vram + ast_plane->offset; +} + /* * Primary plane */ @@ -503,7 +529,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src struct drm_framebuffer *fb, const struct drm_rect *clip) { - struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr); + struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane)); iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip)); drm_fb_memcpy(&dst, fb->pitches, src, fb, clip); @@ -576,12 +602,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane, { struct ast_plane *ast_plane = to_ast_plane(plane); - if (plane->state && plane->state->fb && ast_plane->vaddr) { + if (plane->state && plane->state->fb) { sb->format = plane->state->fb->format; sb->width = plane->state->fb->width; sb->height = plane->state->fb->height; sb->pitch[0] = plane->state->fb->pitches[0]; - iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr); + iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane)); return 0; } return -ENODEV; @@ -608,13 +634,11 @@ static int ast_primary_plane_init(struct ast_device *ast) struct drm_device *dev = &ast->base; struct ast_plane *ast_primary_plane = &ast->primary_plane; struct drm_plane *primary_plane = &ast_primary_plane->base; - void __iomem *vaddr = ast->vram; - u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */ - unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); - unsigned long size = ast->vram_fb_available - cursor_size; + u64 offset = ast_fb_vram_offset(); + unsigned long size = ast_fb_vram_size(ast); int ret; - ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size, + ret = ast_plane_init(dev, ast_primary_plane, offset, size, 0x01, &ast_primary_plane_funcs, ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats), NULL, DRM_PLANE_TYPE_PRIMARY); @@ -922,9 +946,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s /* * Concurrent operations could possibly trigger a call to - * drm_connector_helper_funcs.get_modes by trying to read the - * display modes. Protect access to I/O registers by acquiring - * the I/O-register lock. Released in atomic_flush(). + * drm_connector_helper_funcs.get_modes by reading the display + * modes. Protect access to registers by acquiring the modeset + * lock. */ mutex_lock(&ast->modeset_lock); drm_atomic_helper_commit_tail(state); @@ -938,16 +962,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) { - static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888); struct ast_device *ast = to_ast_device(dev); - unsigned long fbsize, fbpages, max_fbpages; - - max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT; - - fbsize = mode->hdisplay * mode->vdisplay * max_bpp; - fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); - - if (fbpages > max_fbpages) + unsigned long max_fb_size = ast_fb_vram_size(ast); + u64 pitch; + + if (drm_WARN_ON_ONCE(dev, !info)) + return MODE_ERROR; /* driver bug */ + + pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay); + if (!pitch) + return MODE_BAD_WIDTH; + if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET) + return MODE_BAD_WIDTH; /* maximum programmable pitch */ + if (pitch > max_fb_size / mode->vdisplay) return MODE_MEM; return MODE_OK; @@ -1018,10 +1046,7 @@ int ast_mode_config_init(struct ast_device *ast) return ret; drm_mode_config_reset(dev); - - ret = drmm_kms_helper_poll_init(dev); - if (ret) - return ret; + drmm_kms_helper_poll_init(dev); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 91e85e457bdf..37568cf3822c 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par switch (param->vram_size) { default: - case AST_VIDMEM_SIZE_8M: + case SZ_8M: param->dram_config |= 0x00; break; - case AST_VIDMEM_SIZE_16M: + case SZ_16M: param->dram_config |= 0x04; break; - case AST_VIDMEM_SIZE_32M: + case SZ_32M: param->dram_config |= 0x08; break; - case AST_VIDMEM_SIZE_64M: + case SZ_64M: param->dram_config |= 0x0c; break; } @@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par switch (param->vram_size) { default: - case AST_VIDMEM_SIZE_8M: + case SZ_8M: param->dram_config |= 0x00; break; - case AST_VIDMEM_SIZE_16M: + case SZ_16M: param->dram_config |= 0x04; break; - case AST_VIDMEM_SIZE_32M: + case SZ_32M: param->dram_config |= 0x08; break; - case AST_VIDMEM_SIZE_64M: + case SZ_64M: param->dram_config |= 0x0c; break; } @@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast) switch (temp & 0x0c) { default: case 0x00: - param.vram_size = AST_VIDMEM_SIZE_8M; + param.vram_size = SZ_8M; break; case 0x04: - param.vram_size = AST_VIDMEM_SIZE_16M; + param.vram_size = SZ_16M; break; case 0x08: - param.vram_size = AST_VIDMEM_SIZE_32M; + param.vram_size = SZ_32M; break; case 0x0c: - param.vram_size = AST_VIDMEM_SIZE_64M; + param.vram_size = SZ_64M; break; } diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index bb2cc1d8b84e..e15adaf3a80e 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -30,9 +30,11 @@ #define AST_IO_VGACRI (0x54) #define AST_IO_VGACR80_PASSWORD (0xa8) +#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0) #define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) #define AST_IO_VGACRA1_MMIO_ENABLED BIT(2) #define AST_IO_VGACRA3_DVO_ENABLED BIT(7) +#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0) #define AST_IO_VGACRB6_HSYNC_OFF BIT(0) #define AST_IO_VGACRB6_VSYNC_OFF BIT(1) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 09a1be234f71..b9e0ca85226a 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE tristate depends on DRM_BRIDGE && OF select AUXILIARY_BUS + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Simple transparent bridge that is used by several non-DRM drivers to diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 050dae338ffe..1257009e850c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, } static int adv7511_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct adv7511 *adv = bridge_to_adv7511(bridge); int ret = 0; if (adv->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, + ret = drm_bridge_attach(encoder, adv->next_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return ret; diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 83d711ee3a2e..f3fe47b12edc 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345) if (err) return err; - /* - * Power up the sink (DP_SET_POWER register is only available on DPCD - * v1.1 and later). - */ - if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) { - err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]); - if (err < 0) { - DRM_ERROR("Failed to read DP_SET_POWER register: %d\n", - err); - return err; - } - - dpcd[0] &= ~DP_SET_POWER_MASK; - dpcd[0] |= DP_SET_POWER_D0; - - err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]); - if (err < 0) { - DRM_ERROR("Failed to power up DisplayPort link: %d\n", - err); - return err; - } - - /* - * According to the DP 1.1 specification, a "Sink Device must - * exit the power saving state within 1 ms" (Section 2.5.3.1, - * Table 5-52, "Sink Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - } + drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]); /* Possibly enable downspread on the sink */ err = regmap_write(anx6345->map[I2C_IDX_DPTX], @@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = { }; static int anx6345_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct anx6345 *anx6345 = bridge_to_anx6345(bridge); @@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge, anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD; err = drm_connector_attach_encoder(&anx6345->connector, - bridge->encoder); + encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); goto connector_cleanup; @@ -691,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client) struct device *dev; int i, err; - anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL); - if (!anx6345) - return -ENOMEM; + anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge, + &anx6345_bridge_funcs); + if (IS_ERR(anx6345)) + return PTR_ERR(anx6345); mutex_init(&anx6345->lock); @@ -765,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client) /* Look for supported chip ID */ anx6345_poweron(anx6345); if (anx6345_get_chip_id(anx6345)) { - anx6345->bridge.funcs = &anx6345_bridge_funcs; drm_bridge_add(&anx6345->bridge); return 0; diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c index f74694bb9c50..a83020d6576f 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c @@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx) if (err) return err; - /* - * Power up the sink (DP_SET_POWER register is only available on DPCD - * v1.1 and later). - */ - if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) { - err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]); - if (err < 0) { - DRM_ERROR("Failed to read DP_SET_POWER register: %d\n", - err); - return err; - } - - dpcd[0] &= ~DP_SET_POWER_MASK; - dpcd[0] |= DP_SET_POWER_D0; - - err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]); - if (err < 0) { - DRM_ERROR("Failed to power up DisplayPort link: %d\n", - err); - return err; - } - - /* - * According to the DP 1.1 specification, a "Sink Device must - * exit the power saving state within 1 ms" (Section 2.5.3.1, - * Table 5-52, "Sink Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - } + drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]); /* Possibly enable downspread on the sink */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], @@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = { }; static int anx78xx_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); @@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge, anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD; err = drm_connector_attach_encoder(&anx78xx->connector, - bridge->encoder); + encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); goto connector_cleanup; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 071168aa0c3b..a761941bc3c2 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -838,10 +838,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp) int ret; /* Keep the panel disabled while we configure video */ - if (dp->plat_data->panel) { - if (drm_panel_disable(dp->plat_data->panel)) - DRM_ERROR("failed to disable the panel\n"); - } + drm_panel_disable(dp->plat_data->panel); ret = analogix_dp_train_link(dp); if (ret) { @@ -863,13 +860,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp) } /* Safe to enable the panel now */ - if (dp->plat_data->panel) { - ret = drm_panel_enable(dp->plat_data->panel); - if (ret) { - DRM_ERROR("failed to enable the panel\n"); - return ret; - } - } + drm_panel_enable(dp->plat_data->panel); /* Check whether panel supports fast training */ ret = analogix_dp_fast_link_train_detection(dp); @@ -955,67 +946,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp) return analogix_dp_send_psr_spd(dp, &psr_vsc, true); } -/* - * This function is a bit of a catch-all for panel preparation, hopefully - * simplifying the logic of functions that need to prepare/unprepare the panel - * below. - * - * If @prepare is true, this function will prepare the panel. Conversely, if it - * is false, the panel will be unprepared. - * - * If @is_modeset_prepare is true, the function will disregard the current state - * of the panel and either prepare/unprepare the panel based on @prepare. Once - * it finishes, it will update dp->panel_is_modeset to reflect the current state - * of the panel. - */ -static int analogix_dp_prepare_panel(struct analogix_dp_device *dp, - bool prepare, bool is_modeset_prepare) -{ - int ret = 0; - - if (!dp->plat_data->panel) - return 0; - - mutex_lock(&dp->panel_lock); - - /* - * Exit early if this is a temporary prepare/unprepare and we're already - * modeset (since we neither want to prepare twice or unprepare early). - */ - if (dp->panel_is_modeset && !is_modeset_prepare) - goto out; - - if (prepare) - ret = drm_panel_prepare(dp->plat_data->panel); - else - ret = drm_panel_unprepare(dp->plat_data->panel); - - if (ret) - goto out; - - if (is_modeset_prepare) - dp->panel_is_modeset = prepare; - -out: - mutex_unlock(&dp->panel_lock); - return ret; -} - static int analogix_dp_get_modes(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); const struct drm_edid *drm_edid; - int ret, num_modes = 0; + int num_modes = 0; if (dp->plat_data->panel) { num_modes += drm_panel_get_modes(dp->plat_data->panel, connector); } else { - ret = analogix_dp_prepare_panel(dp, true, false); - if (ret) { - DRM_ERROR("Failed to prepare panel (%d)\n", ret); - return 0; - } - drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc); drm_edid_connector_update(&dp->connector, drm_edid); @@ -1024,10 +963,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector) num_modes += drm_edid_connector_add_modes(&dp->connector); drm_edid_free(drm_edid); } - - ret = analogix_dp_prepare_panel(dp, false, false); - if (ret) - DRM_ERROR("Failed to unprepare panel (%d)\n", ret); } if (dp->plat_data->get_modes) @@ -1082,24 +1017,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force) { struct analogix_dp_device *dp = to_dp(connector); enum drm_connector_status status = connector_status_disconnected; - int ret; if (dp->plat_data->panel) return connector_status_connected; - ret = analogix_dp_prepare_panel(dp, true, false); - if (ret) { - DRM_ERROR("Failed to prepare panel (%d)\n", ret); - return connector_status_disconnected; - } - if (!analogix_dp_detect_hpd(dp)) status = connector_status_connected; - ret = analogix_dp_prepare_panel(dp, false, false); - if (ret) - DRM_ERROR("Failed to unprepare panel (%d)\n", ret); - return status; } @@ -1113,10 +1037,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = { }; static int analogix_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct analogix_dp_device *dp = bridge->driver_private; - struct drm_encoder *encoder = dp->encoder; struct drm_connector *connector = NULL; int ret = 0; @@ -1203,7 +1127,6 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - int ret; crtc = analogix_dp_get_new_crtc(dp, old_state); if (!crtc) @@ -1214,9 +1137,7 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, if (old_crtc_state && old_crtc_state->self_refresh_active) return; - ret = analogix_dp_prepare_panel(dp, true, true); - if (ret) - DRM_ERROR("failed to setup the panel ret = %d\n", ret); + drm_panel_prepare(dp->plat_data->panel); } static int analogix_dp_set_bridge(struct analogix_dp_device *dp) @@ -1296,17 +1217,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; - int ret; if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; - if (dp->plat_data->panel) { - if (drm_panel_disable(dp->plat_data->panel)) { - DRM_ERROR("failed to disable the panel\n"); - return; - } - } + drm_panel_disable(dp->plat_data->panel); disable_irq(dp->irq); @@ -1314,9 +1229,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) pm_runtime_put_sync(dp->dev); - ret = analogix_dp_prepare_panel(dp, false, true); - if (ret) - DRM_ERROR("failed to setup the panel ret = %d\n", ret); + drm_panel_unprepare(dp->plat_data->panel); dp->fast_train_enable = false; dp->psr_supported = false; @@ -1505,6 +1418,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) video_info->max_link_rate = 0x0A; video_info->max_lane_count = 0x04; break; + case RK3588_EDP: + video_info->max_link_rate = 0x14; + video_info->max_lane_count = 0x04; + break; case EXYNOS_DP: /* * NOTE: those property parseing code is used for @@ -1540,6 +1457,26 @@ out: return ret; } +static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us) +{ + struct analogix_dp_device *dp = to_dp(aux); + int val; + int ret; + + if (dp->force_hpd) + return 0; + + pm_runtime_get_sync(dp->dev); + + ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val, + wait_us / 100, wait_us); + + pm_runtime_mark_last_busy(dp->dev); + pm_runtime_put_autosuspend(dp->dev); + + return ret; +} + struct analogix_dp_device * analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) { @@ -1560,9 +1497,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; - mutex_init(&dp->panel_lock); - dp->panel_is_modeset = false; - /* * platform dp driver need containor_of the plat_data to get * the driver private data, so we need to store the point of @@ -1625,10 +1559,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) * that we can get the current state of the GPIO. */ dp->irq = gpiod_to_irq(dp->hpd_gpiod); - irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN; } else { dp->irq = platform_get_irq(pdev, 0); - irq_flags = 0; + irq_flags = IRQF_NO_AUTOEN; } if (dp->irq == -ENXIO) { @@ -1645,7 +1579,18 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) dev_err(&pdev->dev, "failed to request irq\n"); goto err_disable_clk; } - disable_irq(dp->irq); + + dp->aux.name = "DP-AUX"; + dp->aux.transfer = analogix_dpaux_transfer; + dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted; + dp->aux.dev = dp->dev; + drm_dp_aux_init(&dp->aux); + + pm_runtime_use_autosuspend(dp->dev); + pm_runtime_set_autosuspend_delay(dp->dev, 100); + ret = devm_pm_runtime_enable(dp->dev); + if (ret) + goto err_disable_clk; return dp; @@ -1681,6 +1626,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp) if (dp->plat_data->power_on) dp->plat_data->power_on(dp->plat_data); + phy_set_mode(dp->phy, PHY_MODE_DP); phy_power_on(dp->phy); analogix_dp_init_dp(dp); @@ -1696,25 +1642,12 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) dp->drm_dev = drm_dev; dp->encoder = dp->plat_data->encoder; - if (IS_ENABLED(CONFIG_PM)) { - pm_runtime_use_autosuspend(dp->dev); - pm_runtime_set_autosuspend_delay(dp->dev, 100); - pm_runtime_enable(dp->dev); - } else { - ret = analogix_dp_resume(dp); - if (ret) - return ret; - } - - dp->aux.name = "DP-AUX"; - dp->aux.transfer = analogix_dpaux_transfer; - dp->aux.dev = dp->dev; dp->aux.drm_dev = drm_dev; ret = drm_dp_aux_register(&dp->aux); if (ret) { DRM_ERROR("failed to register AUX (%d)\n", ret); - goto err_disable_pm_runtime; + return ret; } ret = analogix_dp_create_bridge(drm_dev, dp); @@ -1727,13 +1660,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) err_unregister_aux: drm_dp_aux_unregister(&dp->aux); -err_disable_pm_runtime: - if (IS_ENABLED(CONFIG_PM)) { - pm_runtime_dont_use_autosuspend(dp->dev); - pm_runtime_disable(dp->dev); - } else { - analogix_dp_suspend(dp); - } return ret; } @@ -1744,19 +1670,9 @@ void analogix_dp_unbind(struct analogix_dp_device *dp) analogix_dp_bridge_disable(dp->bridge); dp->connector.funcs->destroy(&dp->connector); - if (dp->plat_data->panel) { - if (drm_panel_unprepare(dp->plat_data->panel)) - DRM_ERROR("failed to turnoff the panel\n"); - } + drm_panel_unprepare(dp->plat_data->panel); drm_dp_aux_unregister(&dp->aux); - - if (IS_ENABLED(CONFIG_PM)) { - pm_runtime_dont_use_autosuspend(dp->dev); - pm_runtime_disable(dp->dev); - } else { - analogix_dp_suspend(dp); - } } EXPORT_SYMBOL_GPL(analogix_dp_unbind); @@ -1782,6 +1698,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector) } EXPORT_SYMBOL_GPL(analogix_dp_stop_crc); +struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux) +{ + struct analogix_dp_device *dp = to_dp(aux); + + return dp->plat_data; +} +EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data); + +struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp) +{ + return &dp->aux; +} +EXPORT_SYMBOL_GPL(analogix_dp_get_aux); + MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); MODULE_DESCRIPTION("Analogix DP Core Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index 774d11574b09..2b54120ba4a3 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -169,9 +169,6 @@ struct analogix_dp_device { bool fast_train_enable; bool psr_supported; - struct mutex panel_lock; - bool panel_is_modeset; - struct analogix_dp_plat_data *plat_data; }; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 3afc73c858c4..38fd8d5014d2 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -11,6 +11,7 @@ #include <linux/gpio/consumer.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/phy/phy.h> #include <drm/bridge/analogix_dp.h> @@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) { u32 reg; + int ret; reg = bwtype; if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62)) writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.link_rate = + drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100; + phy_cfg.dp.set_rate = true; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret); + return; + } + } } void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype) @@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype) void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count) { u32 reg; + int ret; reg = count; writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + phy_cfg.dp.lanes = dp->link_train.lane_count; + phy_cfg.dp.set_lanes = true; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret); + return; + } + } } void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count) @@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count) void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp) { u8 lane; + int ret; for (lane = 0; lane < dp->link_train.lane_count; lane++) writel(dp->link_train.training_lane[lane], dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane); + + if (dp->phy) { + union phy_configure_opts phy_cfg = {0}; + + for (lane = 0; lane < dp->link_train.lane_count; lane++) { + u8 training_lane = dp->link_train.training_lane[lane]; + u8 vs, pe; + + vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + phy_cfg.dp.voltage[lane] = vs; + phy_cfg.dp.pre[lane] = pe; + } + + phy_cfg.dp.set_voltages = true; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret && ret != -EOPNOTSUPP) { + dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret); + return; + } + } } u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane) diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 0b97b66de577..8bfe477c476c 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2141,6 +2141,7 @@ static void hdcp_check_work_func(struct work_struct *work) } static int anx7625_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); @@ -2159,7 +2160,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, } if (ctx->pdata.panel_bridge) { - err = drm_bridge_attach(bridge->encoder, + err = drm_bridge_attach(encoder, ctx->pdata.panel_bridge, &ctx->bridge, flags); if (err) @@ -2568,12 +2569,6 @@ static const struct dev_pm_ops anx7625_pm_ops = { anx7625_runtime_pm_resume, NULL) }; -static void anx7625_runtime_disable(void *data) -{ - pm_runtime_dont_use_autosuspend(data); - pm_runtime_disable(data); -} - static int anx7625_link_bridge(struct drm_dp_aux *aux) { struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux); @@ -2707,11 +2702,10 @@ static int anx7625_i2c_probe(struct i2c_client *client) goto free_wq; } - pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); pm_suspend_ignore_children(dev, true); - ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev); + ret = devm_pm_runtime_enable(dev); if (ret) goto free_wq; @@ -2771,7 +2765,6 @@ static void anx7625_i2c_remove(struct i2c_client *client) if (platform->hdcp_workqueue) { cancel_delayed_work(&platform->hdcp_work); - flush_workqueue(platform->hdcp_workqueue); destroy_workqueue(platform->hdcp_workqueue); } diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index 015983c015e5..c179b86d208f 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -86,6 +86,7 @@ struct drm_aux_bridge_data { }; static int drm_aux_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct drm_aux_bridge_data *data; @@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge, data = container_of(bridge, struct drm_aux_bridge_data, bridge); - return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge, + return drm_bridge_attach(encoder, data->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 48f297c78ee6..b3f588b71a7d 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify); static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index c7a0247e06ad..b022dd6e6b6e 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -425,6 +425,17 @@ #define DSI_NULL_FRAME_OVERHEAD 6 #define DSI_EOT_PKT_SIZE 4 +struct cdns_dsi_bridge_state { + struct drm_bridge_state base; + struct cdns_dsi_cfg dsi_cfg; +}; + +static inline struct cdns_dsi_bridge_state * +to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state) +{ + return container_of(bridge_state, struct cdns_dsi_bridge_state, base); +} + static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input) { return container_of(input, struct cdns_dsi, input); @@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi, struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; unsigned long dsi_hss_hsa_hse_hbp; unsigned int nlanes = output->dev->lanes; + int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock); int ret; ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check); if (ret) return ret; - phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000, - mipi_dsi_pixel_format_to_bpp(output->dev->format), - nlanes, phy_cfg); + ret = phy_mipi_dphy_get_default_config(mode_clock * 1000, + mipi_dsi_pixel_format_to_bpp(output->dev->format), + nlanes, phy_cfg); + if (ret) + return ret; ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check); if (ret) @@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi, } static int cdns_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); @@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge, return -ENOTSUPP; } - return drm_bridge_attach(bridge->encoder, output->bridge, bridge, + return drm_bridge_attach(encoder, output->bridge, bridge, flags); } @@ -655,7 +670,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void cdns_dsi_bridge_disable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); @@ -675,11 +691,17 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge) pm_runtime_put(dsi->base.dev); } -static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); + dsi->phy_initialized = false; + dsi->link_initialized = false; + phy_power_off(dsi->dphy); + phy_exit(dsi->dphy); + pm_runtime_put(dsi->base.dev); } @@ -752,32 +774,59 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi) dsi->link_initialized = true; } -static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_output *output = &dsi->output; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct cdns_dsi_bridge_state *dsi_state; + struct drm_bridge_state *new_bridge_state; struct drm_display_mode *mode; struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; + struct drm_connector *connector; unsigned long tx_byte_period; struct cdns_dsi_cfg dsi_cfg; - u32 tmp, reg_wakeup, div; + u32 tmp, reg_wakeup, div, status; int nlanes; if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0)) return; + new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge); + if (WARN_ON(!new_bridge_state)) + return; + + dsi_state = to_cdns_dsi_bridge_state(new_bridge_state); + dsi_cfg = dsi_state->dsi_cfg; + if (dsi->platform_ops && dsi->platform_ops->enable) dsi->platform_ops->enable(dsi); - mode = &bridge->encoder->crtc->state->adjusted_mode; + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + conn_state = drm_atomic_get_new_connector_state(state, connector); + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + mode = &crtc_state->adjusted_mode; nlanes = output->dev->lanes; - WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false)); - cdns_dsi_hs_init(dsi); cdns_dsi_init_link(dsi); + /* + * Now that the DSI Link and DSI Phy are initialized, + * wait for the CLK and Data Lanes to be ready. + */ + tmp = CLK_LANE_RDY; + for (int i = 0; i < nlanes; i++) + tmp |= DATA_LANE_RDY(i); + + if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status, + (tmp == (status & tmp)), 100, 500000)) + dev_err(dsi->base.dev, + "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n"); + writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa), dsi->regs + VID_HSIZE1); writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact), @@ -892,7 +941,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) writel(tmp, dsi->regs + MCTL_MAIN_EN); } -static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); @@ -904,13 +954,109 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge) cdns_dsi_hs_init(dsi); } +static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); + struct cdns_dsi *dsi = input_to_dsi(input); + struct cdns_dsi_output *output = &dsi->output; + u32 *input_fmts; + + *num_input_fmts = 0; + + input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format); + if (!input_fmts[0]) + return NULL; + + *num_input_fmts = 1; + + return input_fmts; +} + +static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); + struct cdns_dsi *dsi = input_to_dsi(input); + struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state); + const struct drm_display_mode *mode = &crtc_state->mode; + struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg; + + return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false); +} + +static struct drm_bridge_state * +cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge) +{ + struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state; + struct drm_bridge_state *bridge_state; + + if (WARN_ON(!bridge->base.state)) + return NULL; + + bridge_state = drm_priv_to_bridge_state(bridge->base.state); + old_dsi_state = to_cdns_dsi_bridge_state(bridge_state); + + dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL); + if (!dsi_state) + return NULL; + + __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base); + + memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg, + sizeof(dsi_state->dsi_cfg)); + + return &dsi_state->base; +} + +static void +cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge, + struct drm_bridge_state *state) +{ + struct cdns_dsi_bridge_state *dsi_state; + + dsi_state = to_cdns_dsi_bridge_state(state); + + kfree(dsi_state); +} + +static struct drm_bridge_state * +cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge) +{ + struct cdns_dsi_bridge_state *dsi_state; + + dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL); + if (!dsi_state) + return NULL; + + memset(dsi_state, 0, sizeof(*dsi_state)); + dsi_state->base.bridge = bridge; + + return &dsi_state->base; +} + static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = { .attach = cdns_dsi_bridge_attach, .mode_valid = cdns_dsi_bridge_mode_valid, - .disable = cdns_dsi_bridge_disable, - .pre_enable = cdns_dsi_bridge_pre_enable, - .enable = cdns_dsi_bridge_enable, - .post_disable = cdns_dsi_bridge_post_disable, + .atomic_disable = cdns_dsi_bridge_atomic_disable, + .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable, + .atomic_enable = cdns_dsi_bridge_atomic_enable, + .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable, + .atomic_check = cdns_dsi_bridge_atomic_check, + .atomic_reset = cdns_dsi_bridge_atomic_reset, + .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state, + .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state, + .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts, }; static int cdns_dsi_attach(struct mipi_dsi_host *host, @@ -920,8 +1066,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, struct cdns_dsi_output *output = &dsi->output; struct cdns_dsi_input *input = &dsi->input; struct drm_bridge *bridge; - struct drm_panel *panel; - struct device_node *np; int ret; /* @@ -939,26 +1083,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, /* * The host <-> device link might be described using an OF-graph * representation, in this case we extract the device of_node from - * this representation, otherwise we use dsidev->dev.of_node which - * should have been filled by the core. + * this representation. */ - np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT, - dev->channel); - if (!np) - np = of_node_get(dev->dev.of_node); - - panel = of_drm_find_panel(np); - if (!IS_ERR(panel)) { - bridge = drm_panel_bridge_add_typed(panel, - DRM_MODE_CONNECTOR_DSI); - } else { - bridge = of_drm_find_bridge(dev->dev.of_node); - if (!bridge) - bridge = ERR_PTR(-EINVAL); - } - - of_node_put(np); - + bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node, + DSI_OUTPUT_PORT, dev->channel); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); dev_err(host->dev, "failed to add DSI device %s (err = %d)", @@ -968,7 +1096,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, output->dev = dev; output->bridge = bridge; - output->panel = panel; /* * The DSI output has been properly configured, we can now safely @@ -984,12 +1111,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) { struct cdns_dsi *dsi = to_cdns_dsi(host); - struct cdns_dsi_output *output = &dsi->output; struct cdns_dsi_input *input = &dsi->input; drm_bridge_remove(&input->bridge); - if (output->panel) - drm_panel_bridge_remove(output->bridge); return 0; } @@ -1152,7 +1276,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev) clk_disable_unprepare(dsi->dsi_sys_clk); clk_disable_unprepare(dsi->dsi_p_clk); reset_control_assert(dsi->dsi_p_rst); - dsi->link_initialized = false; return 0; } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h index ca7ea2da635c..5db5dbbbcaad 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h @@ -10,7 +10,6 @@ #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> -#include <drm/drm_panel.h> #include <linux/bits.h> #include <linux/completion.h> @@ -21,7 +20,6 @@ struct reset_control; struct cdns_dsi_output { struct mipi_dsi_device *dev; - struct drm_panel *panel; struct drm_bridge *bridge; union phy_configure_opts phy_opts; }; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 81fad14c2cd5..b431e7efd1f0 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -546,76 +546,6 @@ out: } /** - * cdns_mhdp_link_power_up() - power up a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -static -int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D0; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - /* - * According to the DP 1.1 specification, a "Sink Device must exit the - * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink - * Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - - return 0; -} - -/** - * cdns_mhdp_link_power_down() - power down a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -static -int cdns_mhdp_link_power_down(struct drm_dp_aux *aux, - struct cdns_mhdp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D3; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - return 0; -} - -/** * cdns_mhdp_link_configure() - configure a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration @@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp) mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n"); - cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link); + drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision); cdns_mhdp_fill_sink_caps(mhdp, dpcd); @@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp) WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); if (mhdp->plugged) - cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link); + drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision); mhdp->link_up = false; } @@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp) } static int cdns_mhdp_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); @@ -2305,7 +2236,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp) * If everything looks fine, just return, as we don't handle * DP IRQs. */ - if (ret > 0 && + if (!ret && drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) && drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes)) goto out; diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index 81f7c701961f..634c5b030667 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn) return ret; } -static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +static int chipone_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { struct chipone *icn = bridge_to_chipone(bridge); - return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags); + return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags); } #define MAX_INPUT_SEL_FORMATS 1 diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c index da17f0978a79..210c45c1efd4 100644 --- a/drivers/gpu/drm/bridge/chrontel-ch7033.c +++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c @@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status) } static int ch7033_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); struct drm_connector *connector = &priv->connector; int ret; - ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge, + ret = drm_bridge_attach(encoder, priv->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return ret; @@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge, return ret; } - return drm_connector_attach_encoder(&priv->connector, bridge->encoder); + return drm_connector_attach_encoder(&priv->connector, encoder); } static void ch7033_bridge_detach(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 72bc508d4e6e..badd2c7f91a1 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge) } static int display_connector_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; @@ -209,9 +210,10 @@ static int display_connector_probe(struct platform_device *pdev) const char *label = NULL; int ret; - conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL); - if (!conn) - return -ENOMEM; + conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge, + &display_connector_bridge_funcs); + if (IS_ERR(conn)) + return PTR_ERR(conn); platform_set_drvdata(pdev, conn); @@ -361,7 +363,6 @@ static int display_connector_probe(struct platform_device *pdev) } } - conn->bridge.funcs = &display_connector_bridge_funcs; conn->bridge.of_node = pdev->dev.of_node; if (conn->bridge.ddc) diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index 26ae1ab5237f..2cb6dfc7a6d3 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock) } static int fsl_ldb_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); - return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge, + return drm_bridge_attach(encoder, fsl_ldb->panel_bridge, bridge, flags); } @@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge, configured_link_freq = clk_get_rate(fsl_ldb->clk); if (configured_link_freq != requested_link_freq) - dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n", - configured_link_freq, - requested_link_freq); + dev_warn(fsl_ldb->dev, + "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n", + fsl_ldb->clk, configured_link_freq, requested_link_freq); clk_prepare_enable(fsl_ldb->clk); diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c index 9b5bebbe357d..6149ba141a38 100644 --- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c +++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c @@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge) } EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper); -int ldb_bridge_attach_helper(struct drm_bridge *bridge, +int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ldb_channel *ldb_ch = bridge->driver_private; @@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, - ldb_ch->next_bridge, bridge, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); + return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); } EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper); @@ -191,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb) } EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper); -void ldb_add_bridge_helper(struct ldb *ldb, - const struct drm_bridge_funcs *bridge_funcs) +void ldb_add_bridge_helper(struct ldb *ldb) { struct ldb_channel *ldb_ch; int i; @@ -204,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb, continue; ldb_ch->bridge.driver_private = ldb_ch; - ldb_ch->bridge.funcs = bridge_funcs; ldb_ch->bridge.of_node = ldb_ch->np; drm_bridge_add(&ldb_ch->bridge); diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h index a0a5cde27fbc..de187e326999 100644 --- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h +++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h @@ -81,15 +81,14 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge); void ldb_bridge_disable_helper(struct drm_bridge *bridge); -int ldb_bridge_attach_helper(struct drm_bridge *bridge, +int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags); int ldb_init_helper(struct ldb *ldb); int ldb_find_next_bridge_helper(struct ldb *ldb); -void ldb_add_bridge_helper(struct ldb *ldb, - const struct drm_bridge_funcs *bridge_funcs); +void ldb_add_bridge_helper(struct ldb *ldb); void ldb_remove_bridge_helper(struct ldb *ldb); diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c index 3ebf0b9866de..f072c6ed39ef 100644 --- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -23,7 +23,8 @@ struct imx_legacy_bridge { #define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base) static int imx_legacy_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; @@ -76,9 +77,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, imx_bridge->base.ops = DRM_BRIDGE_OP_MODES; imx_bridge->base.type = type; - ret = devm_drm_bridge_add(dev, &imx_bridge->base); - if (ret) - return ERR_PTR(ret); + ret = devm_drm_bridge_add(dev, &imx_bridge->base); + if (ret) + return ERR_PTR(ret); return &imx_bridge->base; } diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c index a17433a7c755..8a4fd7d77a8d 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c @@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge) } static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge); - return drm_bridge_attach(bridge->encoder, pvi->next_bridge, + return drm_bridge_attach(encoder, pvi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c index 524aac751359..47aa65938e6a 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c @@ -47,7 +47,7 @@ struct imx8qm_ldb_channel { struct imx8qm_ldb { struct ldb base; struct device *dev; - struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM]; + struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM]; struct clk *clk_pixel; struct clk *clk_bypass; int active_chno; @@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge, if (is_split) { imx8qm_ldb_ch = - &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1]; + imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1]; imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true, phy_cfg); ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts); @@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge, if (is_split) { imx8qm_ldb_ch = - &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1]; + imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1]; imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true, phy_cfg); ret = phy_configure(imx8qm_ldb_ch->phy, &opts); @@ -226,13 +226,13 @@ static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge, } if (is_split) { - ret = phy_power_on(imx8qm_ldb->channel[0].phy); + ret = phy_power_on(imx8qm_ldb->channel[0]->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power on channel0 PHY: %d\n", ret); - ret = phy_power_on(imx8qm_ldb->channel[1].phy); + ret = phy_power_on(imx8qm_ldb->channel[1]->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power on channel1 PHY: %d\n", @@ -261,12 +261,12 @@ static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge, ldb_bridge_disable_helper(bridge); if (is_split) { - ret = phy_power_off(imx8qm_ldb->channel[0].phy); + ret = phy_power_off(imx8qm_ldb->channel[0]->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power off channel0 PHY: %d\n", ret); - ret = phy_power_off(imx8qm_ldb->channel[1].phy); + ret = phy_power_off(imx8qm_ldb->channel[1]->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power off channel1 PHY: %d\n", @@ -412,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb) int i, ret; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { - imx8qm_ldb_ch = &imx8qm_ldb->channel[i]; + imx8qm_ldb_ch = imx8qm_ldb->channel[i]; ldb_ch = &imx8qm_ldb_ch->base; if (!ldb_ch->is_available) @@ -448,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev) if (!imx8qm_ldb) return -ENOMEM; + for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { + imx8qm_ldb->channel[i] = + devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge, + &imx8qm_ldb_bridge_funcs); + if (IS_ERR(imx8qm_ldb->channel[i])) + return PTR_ERR(imx8qm_ldb->channel[i]); + } + imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel"); if (IS_ERR(imx8qm_ldb->clk_pixel)) { ret = PTR_ERR(imx8qm_ldb->clk_pixel); @@ -473,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev) ldb->ctrl_reg = 0xe0; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) - ldb->channel[i] = &imx8qm_ldb->channel[i].base; + ldb->channel[i] = &imx8qm_ldb->channel[i]->base; ret = ldb_init_helper(ldb); if (ret) @@ -499,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev) } imx8qm_ldb->active_chno = 0; - imx8qm_ldb_ch = &imx8qm_ldb->channel[0]; + imx8qm_ldb_ch = imx8qm_ldb->channel[0]; ldb_ch = &imx8qm_ldb_ch->base; ldb_ch->link_type = pixel_order; } else { for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { - imx8qm_ldb_ch = &imx8qm_ldb->channel[i]; + imx8qm_ldb_ch = imx8qm_ldb->channel[i]; ldb_ch = &imx8qm_ldb_ch->base; if (ldb_ch->is_available) { @@ -525,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imx8qm_ldb); pm_runtime_enable(dev); - ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs); + ldb_add_bridge_helper(ldb); return ret; } diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 3cb484773ddf..5d272916e200 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel { struct imx8qxp_ldb { struct ldb base; struct device *dev; - struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM]; + struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM]; struct clk *clk_pixel; struct clk *clk_bypass; struct drm_bridge *companion; @@ -410,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = { static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb) { struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = - &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno]; + imx8qxp_ldb->channel[imx8qxp_ldb->active_chno]; struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base; struct device_node *ep, *remote; struct device *dev = imx8qxp_ldb->dev; @@ -456,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link) static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb) { struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = - &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno]; + imx8qxp_ldb->channel[imx8qxp_ldb->active_chno]; struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base; struct ldb_channel *companion_ldb_ch; struct device_node *companion; @@ -586,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev) if (!imx8qxp_ldb) return -ENOMEM; + for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { + imx8qxp_ldb->channel[i] = + devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge, + &imx8qxp_ldb_bridge_funcs); + if (IS_ERR(imx8qxp_ldb->channel[i])) + return PTR_ERR(imx8qxp_ldb->channel[i]); + } + imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel"); if (IS_ERR(imx8qxp_ldb->clk_pixel)) { ret = PTR_ERR(imx8qxp_ldb->clk_pixel); @@ -611,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev) ldb->ctrl_reg = 0xe0; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) - ldb->channel[i] = &imx8qxp_ldb->channel[i].base; + ldb->channel[i] = &imx8qxp_ldb->channel[i]->base; ret = ldb_init_helper(ldb); if (ret) @@ -627,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev) } for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { - imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i]; + imx8qxp_ldb_ch = imx8qxp_ldb->channel[i]; ldb_ch = &imx8qxp_ldb_ch->base; if (ldb_ch->is_available) { @@ -660,9 +668,9 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imx8qxp_ldb); pm_runtime_enable(dev); - ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs); + ldb_add_bridge_helper(ldb); - return ret; + return 0; } static void imx8qxp_ldb_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c index 1d9529dc7f2a..1f6fd488e703 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c @@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge, } static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8qxp_pc_channel *ch = bridge->driver_private; @@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, + return drm_bridge_attach(encoder, ch->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index cd6818db0fd3..e092c9ea99b0 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl) } static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8qxp_pixel_link *pl = bridge->driver_private; @@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, + return drm_bridge_attach(encoder, pl->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c index 49dd4f96d52c..da138ab51b3b 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c @@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi { #define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge) static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8qxp_pxl2dpi *p2d = bridge->driver_private; @@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, + return drm_bridge_attach(encoder, p2d->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c index 21152a1c28f7..a3a63a977b0a 100644 --- a/drivers/gpu/drm/bridge/ite-it6263.c +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge, } static int it6263_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct it6263 *it = bridge_to_it6263(bridge); struct drm_connector *connector; int ret; - ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge, + ret = drm_bridge_attach(encoder, it->next_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge, if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; - connector = drm_bridge_connector_init(bridge->dev, bridge->encoder); + connector = drm_bridge_connector_init(bridge->dev, encoder); if (IS_ERR(connector)) { ret = PTR_ERR(connector); dev_err(it->dev, "failed to initialize bridge connector: %d\n", @@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge, return ret; } - drm_connector_attach_encoder(connector, bridge->encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 8a607558ac89..1383d1e21afe 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505) DRM_MODE_ARG(&it6505->video_info)); } -static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux, - struct it6505_drm_dp_link *link, - u8 mode) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < DPCD_V_1_1) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= mode; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - if (mode == DP_SET_POWER_D0) { - /* - * According to the DP 1.1 specification, a "Sink Device must - * exit the power saving state within 1 ms" (Section 2.5.3.1, - * Table 5-52, "Sink Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - } - - return 0; -} - static void it6505_clear_int(struct it6505 *it6505) { it6505_write(it6505, INT_STATUS_01, 0xFF); @@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505) } it6505->auto_train_retry = AUTO_TRAIN_RETRY; - it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, - DP_SET_POWER_D0); + drm_dp_link_power_up(&it6505->aux, it6505->link.revision); dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT); it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count); @@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505) } if (it6505->hpd_state) { - it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, - DP_SET_POWER_D0); + drm_dp_link_power_up(&it6505->aux, it6505->link.revision); dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT); it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count); DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d", @@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge) } static int it6505_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct it6505 *it6505 = bridge_to_it6505(bridge); @@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge, it6505_int_mask_enable(it6505); it6505_video_reset(it6505); - it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, - DP_SET_POWER_D0); + drm_dp_link_power_up(&it6505->aux, it6505->link.revision); } static void it6505_bridge_atomic_disable(struct drm_bridge *bridge, @@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge, DRM_DEV_DEBUG_DRIVER(dev, "start"); if (it6505->powered) { - it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, - DP_SET_POWER_D3); + drm_dp_link_power_down(&it6505->aux, it6505->link.revision); it6505_video_disable(it6505); } } diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index b9f90f32145d..7b110ae53291 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx) } static int it66121_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); @@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); + ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags); if (ret) return ret; diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 52da204f5740..3e49d855b364 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -543,12 +543,13 @@ exit: } static int lt8912_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt8912 *lt = bridge_to_lt8912(bridge); int ret; - ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge, + ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) { dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret); diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index 0fc5ea18fe6a..9b2dac9bd63c 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge) } static int lt9211_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt9211 *ctx = bridge_to_lt9211(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + return drm_bridge_attach(encoder, ctx->panel_bridge, &ctx->bridge, flags); } diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 026803034231..a35a8b8ca89c 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, } static int lt9611_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); - return drm_bridge_attach(bridge->encoder, lt9611->next_bridge, + return drm_bridge_attach(encoder, lt9611->next_bridge, bridge, flags); } @@ -1130,7 +1131,7 @@ static int lt9611_probe(struct i2c_client *client) lt9611->bridge.of_node = client->dev.of_node; lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES | - DRM_BRIDGE_OP_HDMI; + DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO; lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA; lt9611->bridge.vendor = "Lontium"; lt9611->bridge.product = "LT9611"; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index f4c3ff1fdc69..766da2cb45a7 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, } static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); - return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge, + return drm_bridge_attach(encoder, lt9611uxc->next_bridge, bridge, flags); } @@ -774,9 +775,9 @@ static int lt9611uxc_probe(struct i2c_client *client) return -ENODEV; } - lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL); - if (!lt9611uxc) - return -ENOMEM; + lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, <9611uxc_bridge_funcs); + if (IS_ERR(lt9611uxc)) + return PTR_ERR(lt9611uxc); lt9611uxc->dev = dev; lt9611uxc->client = client; @@ -855,7 +856,6 @@ retry: i2c_set_clientdata(client, lt9611uxc); - lt9611uxc->bridge.funcs = <9611uxc_bridge_funcs; lt9611uxc->bridge.of_node = client->dev.of_node; lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; if (lt9611uxc->hpd_supported) @@ -880,7 +880,11 @@ retry: } } - return lt9611uxc_audio_init(dev, lt9611uxc); + ret = lt9611uxc_audio_init(dev, lt9611uxc); + if (ret) + goto err_remove_bridge; + + return 0; err_remove_bridge: free_irq(client->irq, lt9611uxc); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index 389af0233fcd..1646e454e0b0 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge) } static int lvds_codec_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); - return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge, + return drm_bridge_attach(encoder, lvds_codec->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index a47aabf134fd..15a5a1f644fc 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id) } static int ge_b850v3_lvds_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct i2c_client *stdp4028_i2c diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c index 53dd140a1b8d..1d4ae0097df8 100644 --- a/drivers/gpu/drm/bridge/microchip-lvds.c +++ b/drivers/gpu/drm/bridge/microchip-lvds.c @@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds) } static int mchp_lvds_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mchp_lvds *lvds = bridge_to_lvds(bridge); - return drm_bridge_attach(bridge->encoder, lvds->panel_bridge, + return drm_bridge_attach(encoder, lvds->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index d04c62a0cb9f..55912ae11f46 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge, } static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); @@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); - return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags); + return drm_bridge_attach(encoder, panel_bridge, bridge, flags); } static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 27261b2ac9c8..25d7c415478b 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = { }; static int ptn3460_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; /* Let this driver create connector if requested */ - ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge, + ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge, &ptn3460_connector_helper_funcs); drm_connector_register(&ptn_bridge->connector); drm_connector_attach_encoder(&ptn_bridge->connector, - bridge->encoder); + encoder); drm_helper_hpd_irq_event(ptn_bridge->connector.dev); diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 258c85c83a28..79b009ab9396 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = { }; static int panel_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); @@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge, drm_panel_bridge_set_orientation(connector, bridge); drm_connector_attach_encoder(&panel_bridge->connector, - bridge->encoder); + encoder); if (bridge->dev->registered) { if (connector->funcs->reset) diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 13ada42a5514..8726fefc5c65 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge) } static int ps8622_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index a42138b33258..2422ff68c104 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge, } static int ps8640_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); @@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge, } /* Attach the panel-bridge to the dsi bridge */ - ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge, + ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge, &ps_bridge->bridge, flags); if (ret) goto err_bridge_attach; diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 54de6ed2fae8..0014c497e3fe 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge, } static int samsung_dsim_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); - return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge, + return drm_bridge_attach(encoder, dsi->out_bridge, bridge, flags); } @@ -1935,9 +1936,9 @@ int samsung_dsim_probe(struct platform_device *pdev) struct samsung_dsim *dsi; int ret, i; - dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); - if (!dsi) - return -ENOMEM; + dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs); + if (IS_ERR(dsi)) + return PTR_ERR(dsi); init_completion(&dsi->completed); spin_lock_init(&dsi->transfer_lock); @@ -2007,7 +2008,6 @@ int samsung_dsim_probe(struct platform_device *pdev) pm_runtime_enable(dev); - dsi->bridge.funcs = &samsung_dsim_bridge_funcs; dsi->bridge.of_node = dev->of_node; dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 914a2609a685..6de61d9fe064 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -416,6 +416,7 @@ out: } static int sii902x_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sii902x *sii902x = bridge_to_sii902x(bridge); @@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge, int ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) - return drm_bridge_attach(bridge->encoder, sii902x->next_bridge, + return drm_bridge_attach(encoder, sii902x->next_bridge, bridge, flags); drm_connector_helper_add(&sii902x->connector, @@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge, if (ret) return ret; - drm_connector_attach_encoder(&sii902x->connector, bridge->encoder); + drm_connector_attach_encoder(&sii902x->connector, encoder); return 0; } @@ -1138,6 +1139,7 @@ static int sii902x_init(struct sii902x *sii902x) sii902x->bridge.of_node = dev->of_node; sii902x->bridge.timings = &default_sii902x_timings; sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; + sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA; if (sii902x->i2c->irq > 0) sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD; diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 28a2e1ee04b2..3af650dc92a1 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) } static int sii8620_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sii8620 *ctx = bridge_to_sii8620(bridge); diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index ab0b0e36e97a..70db5b99e5bb 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = { }; static int simple_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge); int ret; - ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge, + ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge, return ret; } - drm_connector_attach_encoder(&sbridge->connector, bridge->encoder); + drm_connector_attach_encoder(&sbridge->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 6166f197e37b..5e5f8c2f95be 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -1077,6 +1077,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HDMI | + DRM_BRIDGE_OP_HDMI_AUDIO | DRM_BRIDGE_OP_HPD; hdmi->bridge.of_node = pdev->dev.of_node; hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0890add5f707..8791408dd1ff 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -22,8 +22,8 @@ #include <media/cec-notifier.h> -#include <uapi/linux/media-bus-format.h> -#include <uapi/linux/videodev2.h> +#include <linux/media-bus-format.h> +#include <linux/videodev2.h> #include <drm/bridge/dw_hdmi.h> #include <drm/display/drm_hdmi_helper.h> @@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge, } static int dw_hdmi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dw_hdmi *hdmi = bridge->driver_private; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) - return drm_bridge_attach(bridge->encoder, hdmi->next_bridge, + return drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags); return dw_hdmi_connector_create(hdmi); @@ -3332,9 +3333,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, u8 config0; u8 config3; - hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return ERR_PTR(-ENOMEM); + hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs); + if (IS_ERR(hdmi)) + return hdmi; hdmi->plat_data = plat_data; hdmi->dev = dev; @@ -3494,7 +3495,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, } hdmi->bridge.driver_private = hdmi; - hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; hdmi->bridge.interlace_allowed = true; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 2b6e70a49f43..b08ada920a50 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, } static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); /* Set the encoder type as caller does not know it */ - bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI; + encoder->encoder_type = DRM_MODE_ENCODER_DSI; /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, + return drm_bridge_attach(encoder, dsi->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c index 5fd7a459efdd..c76f5f2e74d1 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c @@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge, } static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge); /* Set the encoder type as caller does not know it */ - bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI; + encoder->encoder_type = DRM_MODE_ENCODER_DSI; /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge, + return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c index 49c76027f831..edf01476f2ef 100644 --- a/drivers/gpu/drm/bridge/tc358762.c +++ b/drivers/gpu/drm/bridge/tc358762.c @@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge, } static int tc358762_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc358762 *ctx = bridge_to_tc358762(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + return drm_bridge_attach(encoder, ctx->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c index 3d3d135b4348..3f76c890fad9 100644 --- a/drivers/gpu/drm/bridge/tc358764.c +++ b/drivers/gpu/drm/bridge/tc358764.c @@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge) } static int tc358764_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc358764 *ctx = bridge_to_tc358764(bridge); - return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags); } static const struct drm_bridge_funcs tc358764_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 39e2d3a7a27d..7e5449fb86a3 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = { }; static int tc_dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); @@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge, } static int tc_edp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index ec79b0dd0e2c..063f217a17b6 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = { }; static int tc358768_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); @@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge, return -ENOTSUPP; } - return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge, + return drm_bridge_attach(encoder, priv->output.bridge, bridge, flags); } @@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void tc358768_bridge_disable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); int ret; @@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge) dev_warn(priv->dev, "Software disable failed: %d\n", ret); } -static void tc358768_bridge_post_disable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); @@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val) return (u32)div_u64(m, n); } -static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); struct mipi_dsi_device *dsi_dev = priv->output.dev; unsigned long mode_flags = dsi_dev->mode_flags; u32 val, val2, lptxcnt, hact, data_type; s32 raw_val; + struct drm_crtc_state *crtc_state; + struct drm_connector_state *conn_state; + struct drm_connector *connector; const struct drm_display_mode *mode; u32 hsbyteclk_ps, dsiclk_ps, ui_ps; u32 dsiclk, hsbyteclk; @@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) return; } - mode = &bridge->encoder->crtc->state->adjusted_mode; + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + conn_state = drm_atomic_get_new_connector_state(state, connector); + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + mode = &crtc_state->adjusted_mode; ret = tc358768_setup_pll(priv, mode); if (ret) { dev_err(dev, "PLL setup failed: %d\n", ret); @@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) tc358768_write(priv, TC358768_DSI_CONFW, val); ret = tc358768_clear_error(priv); - if (ret) { + if (ret) dev_err(dev, "Bridge pre_enable failed: %d\n", ret); - tc358768_bridge_disable(bridge); - tc358768_bridge_post_disable(bridge); - } } -static void tc358768_bridge_enable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); int ret; @@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge) tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6)); ret = tc358768_clear_error(priv); - if (ret) { + if (ret) dev_err(priv->dev, "Bridge enable failed: %d\n", ret); - tc358768_bridge_disable(bridge); - tc358768_bridge_post_disable(bridge); - } } #define MAX_INPUT_SEL_FORMATS 1 @@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = { .attach = tc358768_bridge_attach, .mode_valid = tc358768_bridge_mode_valid, .mode_fixup = tc358768_mode_fixup, - .pre_enable = tc358768_bridge_pre_enable, - .enable = tc358768_bridge_enable, - .disable = tc358768_bridge_disable, - .post_disable = tc358768_bridge_post_disable, + .atomic_pre_enable = tc358768_bridge_atomic_pre_enable, + .atomic_enable = tc358768_bridge_atomic_enable, + .atomic_disable = tc358768_bridge_atomic_disable, + .atomic_post_disable = tc358768_bridge_atomic_post_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index c89757bec4e6..1b10e6ee1724 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b) return container_of(b, struct tc_data, bridge); } -static void tc_bridge_pre_enable(struct drm_bridge *bridge) +static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc_data *tc = bridge_to_tc(bridge); struct device *dev = &tc->dsi->dev; @@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge) usleep_range(10, 20); } -static void tc_bridge_post_disable(struct drm_bridge *bridge) +static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc_data *tc = bridge_to_tc(bridge); struct device *dev = &tc->dsi->dev; @@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val) ret, addr); } -/* helper function to access bus_formats */ -static struct drm_connector *get_connector(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - return connector; - - return NULL; -} - -static void tc_bridge_enable(struct drm_bridge *bridge) +static void tc_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc_data *tc = bridge_to_tc(bridge); u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2; u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2; u32 val = 0; u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay; - struct drm_display_mode *mode; - struct drm_connector *connector = get_connector(bridge->encoder); - - mode = &bridge->encoder->crtc->state->adjusted_mode; + struct drm_connector *connector = + drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; hback_porch = mode->htotal - mode->hsync_end; hsync_len = mode->hsync_end - mode->hsync_start; @@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc) } static int tc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, tc->panel_bridge, + return drm_bridge_attach(encoder, tc->panel_bridge, &tc->bridge, flags); } static const struct drm_bridge_funcs tc_bridge_funcs = { .attach = tc_bridge_attach, - .pre_enable = tc_bridge_pre_enable, - .enable = tc_bridge_enable, + .atomic_pre_enable = tc_bridge_atomic_pre_enable, + .atomic_enable = tc_bridge_atomic_enable, .mode_valid = tc_mode_valid, - .post_disable = tc_bridge_post_disable, + .atomic_post_disable = tc_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, }; static int tc_attach_host(struct tc_data *tc) diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c index 20658258fb51..850909f78a7b 100644 --- a/drivers/gpu/drm/bridge/tda998x_drv.c +++ b/drivers/gpu/drm/bridge/tda998x_drv.c @@ -1365,6 +1365,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv, /* DRM bridge functions */ static int tda998x_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge); @@ -1780,9 +1781,9 @@ static int tda998x_create(struct device *dev) u32 video; int rev_lo, rev_hi, ret; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs); + if (IS_ERR(priv)) + return PTR_ERR(priv); dev_set_drvdata(dev, priv); @@ -1947,7 +1948,6 @@ static int tda998x_create(struct device *dev) tda998x_audio_codec_init(priv, &client->dev); } - priv->bridge.funcs = &tda998x_bridge_funcs; #ifdef CONFIG_OF priv->bridge.of_node = dev->of_node; #endif diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index bba10cf9b4f9..e2fc78adebcf 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge) } static int thc63_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct thc63_dev *thc63 = to_thc63(bridge); - return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags); + return drm_bridge_attach(encoder, thc63->next, bridge, flags); } static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index 85f2a0e74a1c..47638d1c96ec 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge, drm_mode_copy(&dlpc->mode, adjusted_mode); } -static int dlpc_attach(struct drm_bridge *bridge, +static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dlpc *dlpc = bridge_to_dlpc(bridge); - return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags); } static const struct drm_bridge_funcs dlpc_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 95563aa1b450..033c44326552 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -40,7 +40,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */ +#include <drm/drm_bridge_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_print.h> @@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge) } static int sn65dsi83_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + return drm_bridge_attach(encoder, ctx->panel_bridge, &ctx->bridge, flags); } @@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx) static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83) { - struct drm_device *dev = sn65dsi83->bridge.dev; struct drm_modeset_acquire_ctx ctx; int err; @@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83) * Keep the lock during the whole operation to be atomic. */ - DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); - - if (!sn65dsi83->bridge.encoder->crtc) { - /* - * No CRTC attached -> No CRTC active outputs to reset - * This can happen when the SN65DSI83 is reset. Simply do - * nothing without returning any errors. - */ - err = 0; - goto end; - } + drm_modeset_acquire_init(&ctx, 0); dev_warn(sn65dsi83->dev, "reset the pipe\n"); - err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx); +retry: + err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx); + if (err == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } -end: - DRM_MODESET_LOCK_ALL_END(dev, ctx, err); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); - return err; + return 0; } static void sn65dsi83_reset_work(struct work_struct *ws) @@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client) struct sn65dsi83 *ctx; int ret; - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); ctx->dev = dev; INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work); @@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client) dev_set_drvdata(dev, ctx); i2c_set_clientdata(client, ctx); - ctx->bridge.funcs = &sn65dsi83_funcs; ctx->bridge.of_node = dev->of_node; ctx->bridge.pre_enable_prev_first = true; ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 01d456b955ab..60224f476e1d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -35,6 +35,7 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */ #define SN_DEVICE_REV_REG 0x08 #define SN_DPPLL_SRC_REG 0x0A #define DPPLL_CLK_SRC_DSICLK BIT(0) @@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata, regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); } -static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata) +static struct drm_display_mode * +get_new_adjusted_display_mode(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_connector *connector = + drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + + return &crtc_state->adjusted_mode; +} + +static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { u32 bit_rate_khz, clk_freq_khz; struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); bit_rate_khz = mode->clock * mipi_dsi_pixel_format_to_bpp(pdata->dsi->format); @@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = { 460800000, }; -static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { int i; u32 refclk_rate; @@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut); clk_prepare_enable(pdata->refclk); } else { - refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000; + refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000; refclk_lut = ti_sn_bridge_dsiclk_lut; refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut); } @@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i]; } -static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) +static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { mutex_lock(&pdata->comms_mutex); /* configure bridge ref_clk */ - ti_sn_bridge_set_refclk_freq(pdata); + ti_sn_bridge_set_refclk_freq(pdata, state); /* * HPD on this bridge chip is a bit useless. This is an eDP bridge @@ -376,7 +394,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) * clock so reading early doesn't work. */ if (pdata->refclk) - ti_sn65dsi86_enable_comms(pdata); + ti_sn65dsi86_enable_comms(pdata, NULL); return ret; } @@ -423,36 +441,8 @@ static int status_show(struct seq_file *s, void *data) return 0; } - DEFINE_SHOW_ATTRIBUTE(status); -static void ti_sn65dsi86_debugfs_remove(void *data) -{ - debugfs_remove_recursive(data); -} - -static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata) -{ - struct device *dev = pdata->dev; - struct dentry *debugfs; - int ret; - - debugfs = debugfs_create_dir(dev_name(dev), NULL); - - /* - * We might get an error back if debugfs wasn't enabled in the kernel - * so let's just silently return upon failure. - */ - if (IS_ERR_OR_NULL(debugfs)) - return; - - ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs); - if (ret) - return; - - debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); -} - /* ----------------------------------------------------------------------------- * Auxiliary Devices (*not* AUX) */ @@ -732,6 +722,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 } static int ti_sn_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); @@ -748,7 +739,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, * Attach the next bridge. * We never want the next bridge to *also* create a connector. */ - ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, + ret = drm_bridge_attach(encoder, pdata->next_bridge, &pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) goto err_initted_aux; @@ -821,12 +812,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge, regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0); } -static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { unsigned int bit_rate_mhz, clk_freq_mhz; unsigned int val; struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); /* set DSIA clk frequency */ bit_rate_mhz = (mode->clock / 1000) * @@ -856,12 +848,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = { 0, 1620, 2160, 2430, 2700, 3240, 4320, 5400 }; -static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp) +static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state, + unsigned int bpp) { unsigned int bit_rate_khz, dp_rate_mhz; unsigned int i; struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); /* Calculate minimum bit rate based on our pixel clock. */ bit_rate_khz = mode->clock * bpp; @@ -960,10 +954,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata) return valid_rates; } -static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); u8 hsync_polarity = 0, vsync_polarity = 0; if (mode->flags & DRM_MODE_FLAG_NHSYNC) @@ -1105,7 +1100,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, pdata->ln_polrs << LN_POLRS_OFFSET); /* set dsi clk frequency value */ - ti_sn_bridge_set_dsi_rate(pdata); + ti_sn_bridge_set_dsi_rate(pdata, state); /* * The SN65DSI86 only supports ASSR Display Authentication method and @@ -1140,7 +1135,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, valid_rates = ti_sn_bridge_read_valid_rates(pdata); /* Train until we run out of rates */ - for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp); + for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp); dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); dp_rate_idx++) { if (!(valid_rates & BIT(dp_rate_idx))) @@ -1156,7 +1151,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, } /* config video parameters */ - ti_sn_bridge_set_video_timings(pdata); + ti_sn_bridge_set_video_timings(pdata, state); /* enable video stream */ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, @@ -1171,7 +1166,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge, pm_runtime_get_sync(pdata->dev); if (!pdata->refclk) - ti_sn65dsi86_enable_comms(pdata); + ti_sn65dsi86_enable_comms(pdata, state); /* td7: min 100 us after enable before DSI data */ usleep_range(100, 110); @@ -1216,6 +1211,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge, return drm_edid_read_ddc(connector, &pdata->aux.ddc); } +static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + struct dentry *debugfs; + + debugfs = debugfs_create_dir(dev_name(pdata->dev), root); + debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); +} + static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .attach = ti_sn_bridge_attach, .detach = ti_sn_bridge_detach, @@ -1229,6 +1233,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .debugfs_init = ti_sn65dsi86_debugfs_init, }; static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, @@ -1312,7 +1317,6 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, if (ret) return ret; - pdata->bridge.funcs = &ti_sn_bridge_funcs; pdata->bridge.of_node = np; pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; @@ -1894,6 +1898,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ti_sn65dsi86 *pdata; + u8 id_buf[8]; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { @@ -1901,9 +1906,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) return -ENODEV; } - pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL); - if (!pdata) - return -ENOMEM; + pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); dev_set_drvdata(dev, pdata); pdata->dev = dev; @@ -1937,7 +1942,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (ret) return ret; - ti_sn65dsi86_debugfs_init(pdata); + pm_runtime_get_sync(dev); + ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf)); + pm_runtime_put_autosuspend(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to read device id\n"); + + /* The ID string is stored backwards */ + if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf))) + return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n"); /* * Break ourselves up into a collection of aux devices. The only real diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c index 22316382451f..cca75443f012 100644 --- a/drivers/gpu/drm/bridge/ti-tdp158.c +++ b/drivers/gpu/drm/bridge/ti-tdp158.c @@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge, regulator_disable(tdp158->vcc); } -static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +static int tdp158_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { struct tdp158 *tdp158 = bridge->driver_private; - return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags); + return drm_bridge_attach(encoder, tdp158->next, bridge, flags); } static const struct drm_bridge_funcs tdp158_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 79ab5da827e1..e15d232ddbac 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status) } static int tfp410_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); int ret; - ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge, + ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge, drm_display_info_set_bus_formats(&dvi->connector.display_info, &dvi->bus_format, 1); - drm_connector_attach_encoder(&dvi->connector, bridge->encoder); + drm_connector_attach_encoder(&dvi->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index 47b74cb25b14..1c289051a598 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge) } static int tpd12s015_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); @@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge, + ret = drm_bridge_attach(encoder, tpd->next_bridge, bridge, flags); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index a8fca079921b..fddfbd4d2493 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y CONFIG_MTK_CMDQ=y CONFIG_REGULATOR_DA9211=y CONFIG_DRM_ANALOGIX_ANX7625=y +CONFIG_PHY_MTK_HDMI=y +CONFIG_PHY_MTK_MIPI_DSI=y # For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware. CONFIG_ARCH_TEGRA=y diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh index 19fe01257ab9..284873e94d8d 100644 --- a/drivers/gpu/drm/ci/build.sh +++ b/drivers/gpu/drm/ci/build.sh @@ -98,14 +98,14 @@ done make ${KERNEL_IMAGE_NAME} -mkdir -p /lava-files/ +mkdir -p /kernel/ for image in ${KERNEL_IMAGE_NAME}; do - cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/. + cp arch/${KERNEL_ARCH}/boot/${image} /kernel/. done if [[ -n ${DEVICE_TREES} ]]; then make dtbs - cp ${DEVICE_TREES} /lava-files/. + cp ${DEVICE_TREES} /kernel/. fi make modules @@ -121,11 +121,11 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then -d arch/arm64/boot/Image.lzma \ -C lzma\ -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \ - /lava-files/cheza-kernel + /kernel/cheza-kernel KERNEL_IMAGE_NAME+=" cheza-kernel" # Make a gzipped copy of the Image for db410c. - gzip -k /lava-files/Image + gzip -k /kernel/Image KERNEL_IMAGE_NAME+=" Image.gz" fi @@ -139,7 +139,7 @@ cp -rfv drivers/gpu/drm/ci/* install/. . .gitlab-ci/container/container_post_build.sh if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then - xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz + xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz" if [[ -n $DEVICE_TREES ]]; then @@ -148,7 +148,7 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then ls -l "${S3_JWT_FILE}" for f in $FILES_TO_UPLOAD; do - ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \ + ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f done @@ -165,7 +165,7 @@ ln -s common artifacts/install/ci-common cp .config artifacts/${CI_JOB_NAME}_config for image in ${KERNEL_IMAGE_NAME}; do - cp /lava-files/$image artifacts/install/. + cp /kernel/$image artifacts/install/. done tar -C artifacts -cf artifacts/install.tar install diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml index 274f118533a7..8eb56ebcf4aa 100644 --- a/drivers/gpu/drm/ci/build.yml +++ b/drivers/gpu/drm/ci/build.yml @@ -67,7 +67,7 @@ testing:arm32: # # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel # becoming too big for their bootloaders. - ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT" + ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH" UPLOAD_TO_MINIO: 1 MERGE_FRAGMENT: arm.config @@ -79,7 +79,7 @@ testing:arm64: # # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel # becoming too big for their bootloaders. - ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT" + ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH" UPLOAD_TO_MINIO: 1 MERGE_FRAGMENT: arm64.config @@ -91,7 +91,7 @@ testing:x86_64: # # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel # becoming too big for their bootloaders. - ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT" + ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH" UPLOAD_TO_MINIO: 1 MERGE_FRAGMENT: x86_64.config @@ -143,6 +143,10 @@ debian-arm64-release: rules: - when: never +debian-arm64-ubsan: + rules: + - when: never + debian-build-testing: rules: - when: never @@ -183,6 +187,10 @@ debian-testing-msan: rules: - when: never +debian-testing-ubsan: + rules: + - when: never + debian-vulkan: rules: - when: never diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml index 07dc13ff865d..56c95c2f91ae 100644 --- a/drivers/gpu/drm/ci/container.yml +++ b/drivers/gpu/drm/ci/container.yml @@ -24,6 +24,18 @@ alpine/x86_64_build: rules: - when: never +debian/arm32_test-base: + rules: + - when: never + +debian/arm32_test-gl: + rules: + - when: never + +debian/arm32_test-vk: + rules: + - when: never + debian/arm64_test-gl: rules: - when: never @@ -32,6 +44,10 @@ debian/arm64_test-vk: rules: - when: never +debian/baremetal_arm32_test: + rules: + - when: never + debian/ppc64el_build: rules: - when: never @@ -40,6 +56,14 @@ debian/s390x_build: rules: - when: never +debian/x86_32_build: + rules: + - when: never + +debian/x86_64_test-android: + rules: + - when: never + debian/x86_64_test-vk: rules: - when: never diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index f04aabe8327c..65adcd97e06b 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -1,11 +1,11 @@ variables: DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa - DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024 + DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 82ab58f6c6f94fa80ca7e1615146f08356e3ba69 UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git TARGET_BRANCH: drm-next - IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c + IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git DEQP_RUNNER_GIT_TAG: v0.20.0 @@ -143,11 +143,11 @@ stages: # Pre-merge pipeline - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event" # Push to a branch on a fork - - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push" + - if: &is-fork-push $CI_PIPELINE_SOURCE == "push" # nightly pipeline - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule" # pipeline for direct pushes that bypassed the CI - - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot" + - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot" # Rules applied to every job in the pipeline @@ -170,29 +170,48 @@ stages: - !reference [.disable-farm-mr-rules, rules] # Never run immediately after merging, as we just ran everything - !reference [.never-post-merge-rules, rules] - # Build everything in merge pipelines, if any files affecting the pipeline - # were changed + # Build everything in merge pipelines - if: *is-merge-attempt - changes: &all_paths - - drivers/gpu/drm/ci/**/* when: on_success # Same as above, but for pre-merge pipelines - if: *is-pre-merge - changes: - *all_paths when: manual - # Skip everything for pre-merge and merge pipelines which don't change - # anything in the build + # Build everything after someone bypassed the CI + - if: *is-direct-push + when: manual + # Build everything in scheduled pipelines + - if: *is-scheduled-pipeline + when: on_success + # Allow building everything in fork pipelines, but build nothing unless + # manually triggered + - when: manual + + +# Repeat of the above but with `when: on_success` replaced with +# `when: delayed` + `start_in:`, for build-only jobs. +# Note: make sure the branches in this list are the same as in +# `.container+build-rules` above. +.build-only-delayed-rules: + rules: + - !reference [.common-rules, rules] + # Run when re-enabling a disabled farm, but not when disabling it + - !reference [.disable-farm-mr-rules, rules] + # Never run immediately after merging, as we just ran everything + - !reference [.never-post-merge-rules, rules] + # Build everything in merge pipelines - if: *is-merge-attempt - when: never + when: delayed + start_in: &build-delay 5 minutes + # Same as above, but for pre-merge pipelines - if: *is-pre-merge - when: never + when: manual # Build everything after someone bypassed the CI - if: *is-direct-push - when: on_success + when: manual # Build everything in scheduled pipelines - if: *is-scheduled-pipeline - when: on_success + when: delayed + start_in: *build-delay # Allow building everything in fork pipelines, but build nothing unless # manually triggered - when: manual diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh index 68b042e43b7f..2a0599f12c58 100755 --- a/drivers/gpu/drm/ci/igt_runner.sh +++ b/drivers/gpu/drm/ci/igt_runner.sh @@ -85,5 +85,16 @@ deqp-runner junit \ --limit 50 \ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml" +# Check if /proc/lockdep_stats exists +if [ -f /proc/lockdep_stats ]; then + # If debug_locks is 0, it indicates lockdep is detected and it turns itself off. + debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}') + if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then + echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information." + cat /proc/lockdep_stats + ret=101 + fi +fi + cd $oldpath exit $ret diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml index 20049f3626b2..c04ba0e69935 100644 --- a/drivers/gpu/drm/ci/image-tags.yml +++ b/drivers/gpu/drm/ci/image-tags.yml @@ -1,5 +1,5 @@ variables: - CONTAINER_TAG: "20250204-mesa-uprev" + CONTAINER_TAG: "20250307-mesa-uprev" DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base" DEBIAN_BASE_TAG: "${CONTAINER_TAG}" @@ -20,3 +20,5 @@ variables: DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}" ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" + + CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh index 6e5ac51e8c0a..f22720359b33 100755 --- a/drivers/gpu/drm/ci/lava-submit.sh +++ b/drivers/gpu/drm/ci/lava-submit.sh @@ -48,7 +48,8 @@ ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)" rm -rf results mkdir -p results/job-rootfs-overlay/ -artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh +artifacts/ci-common/export-gitlab-job-env-for-dut.sh \ + > results/job-rootfs-overlay/set-job-env-vars.sh cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/ cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/ diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 6a1e059858e5..84a25f0e783b 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -1,6 +1,14 @@ +.allow_failure_lockdep: + variables: + FF_USE_NEW_BASH_EVAL_STRATEGY: 'true' + allow_failure: + exit_codes: + - 101 + .lava-test: extends: - .container+build-rules + - .allow_failure_lockdep timeout: "1h30m" rules: - !reference [.scheduled_pipeline-rules, rules] @@ -69,6 +77,7 @@ extends: - .baremetal-test-arm64 - .use-debian/baremetal_arm64_test + - .allow_failure_lockdep timeout: "1h30m" rules: - !reference [.scheduled_pipeline-rules, rules] @@ -89,6 +98,28 @@ tags: - $RUNNER_TAG +.software-driver: + stage: software-driver + extends: + - .allow_failure_lockdep + timeout: "1h30m" + rules: + - !reference [.scheduled_pipeline-rules, rules] + - when: on_success + extends: + - .test-gl + tags: + - kvm + script: + - ln -sf $CI_PROJECT_DIR/install /install + - mv install/bzImage /kernel/bzImage + - mkdir -p /lib/modules + - install/crosvm-runner.sh install/igt_runner.sh + needs: + - debian/x86_64_test-gl + - testing:x86_64 + - igt:x86_64 + .msm-sc7180: extends: - .lava-igt:arm64 @@ -133,7 +164,7 @@ msm:apq8016: BM_KERNEL_EXTRA_ARGS: clk_ignore_unused RUNNER_TAG: google-freedreno-db410c script: - - ./install/bare-metal/fastboot.sh + - ./install/bare-metal/fastboot.sh || exit $? msm:apq8096: extends: @@ -147,7 +178,7 @@ msm:apq8096: GPU_VERSION: apq8096 RUNNER_TAG: google-freedreno-db820c script: - - ./install/bare-metal/fastboot.sh + - ./install/bare-metal/fastboot.sh || exit $? msm:sdm845: extends: @@ -161,7 +192,7 @@ msm:sdm845: GPU_VERSION: sdm845 RUNNER_TAG: google-freedreno-cheza script: - - ./install/bare-metal/cros-servo.sh + - ./install/bare-metal/cros-servo.sh || exit $? msm:sm8350-hdk: extends: @@ -440,47 +471,16 @@ panfrost:g12b: - .panfrost-gpu virtio_gpu:none: - stage: software-driver - timeout: "1h30m" - rules: - - !reference [.scheduled_pipeline-rules, rules] - - when: on_success + extends: + - .software-driver variables: CROSVM_GALLIUM_DRIVER: llvmpipe DRIVER_NAME: virtio_gpu GPU_VERSION: none - extends: - - .test-gl - tags: - - kvm - script: - - ln -sf $CI_PROJECT_DIR/install /install - - mv install/bzImage /lava-files/bzImage - - install/crosvm-runner.sh install/igt_runner.sh - needs: - - debian/x86_64_test-gl - - testing:x86_64 - - igt:x86_64 vkms:none: - stage: software-driver - timeout: "1h30m" - rules: - - !reference [.scheduled_pipeline-rules, rules] - - when: on_success + extends: + - .software-driver variables: DRIVER_NAME: vkms GPU_VERSION: none - extends: - - .test-gl - tags: - - kvm - script: - - ln -sf $CI_PROJECT_DIR/install /install - - mv install/bzImage /lava-files/bzImage - - mkdir -p /lib/modules - - ./install/crosvm-runner.sh ./install/igt_runner.sh - needs: - - debian/x86_64_test-gl - - testing:x86_64 - - igt:x86_64 diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt index 75374085f40f..f44dbce3151a 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt @@ -14,16 +14,10 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail amdgpu/amd_plane@mpo-scale-p010,Fail amdgpu/amd_plane@mpo-scale-rgb,Crash amdgpu/amd_plane@mpo-swizzle-toggle,Fail -amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash +amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail kms_addfb_basic@bad-pitch-65536,Fail kms_addfb_basic@bo-too-small,Fail kms_addfb_basic@too-high,Fail -kms_async_flips@alternate-sync-async-flip,Fail -kms_async_flips@alternate-sync-async-flip-atomic,Fail -kms_async_flips@test-cursor,Fail -kms_async_flips@test-cursor-atomic,Fail -kms_async_flips@test-time-stamp,Fail -kms_async_flips@test-time-stamp-atomic,Fail kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail kms_atomic_transition@plane-all-transition,Fail kms_atomic_transition@plane-all-transition-nonblocking,Fail diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt index 3879c4812a22..902d54027506 100644 --- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt +++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt @@ -14,6 +14,7 @@ gem_.* i915_.* xe_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt index a29cea4f234c..8e2b5504004e 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt @@ -1,22 +1,18 @@ -core_setmaster@master-drop-set-shared-fd,Fail +core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail -kms_async_flips@test-time-stamp,Timeout -kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout -kms_flip@dpms-off-confusion-interruptible,Timeout -kms_flip@wf_vblank-ts-check-interruptible,Fail +kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout +kms_fb_coherency@memset-crc,Crash kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail -kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail -kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail @@ -31,12 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout kms_lease@lease-uevent,Fail -kms_lease@page-flip-implicit-plane,Timeout kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail +kms_plane_scaling@planes-upscale-factor-0-25,Timeout +kms_pm_backlight@brightness-with-dpms,Crash +kms_pm_backlight@fade,Crash +kms_prop_blob@invalid-set-prop-any,Fail +kms_properties@connector-properties-legacy,Timeout +kms_universal_plane@disable-primary-vs-flip,Timeout perf@i915-ref-count,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash @@ -44,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt index 2ef1dc35a7fa..922327632eff 100644 --- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt @@ -11,6 +11,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt index ee11999e3da1..7353ab11e940 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt @@ -1,4 +1,3 @@ -core_setmaster@master-drop-set-user,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -16,6 +15,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail @@ -30,7 +30,6 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail kms_pm_backlight@basic-brightness,Fail -kms_pm_backlight@brightness-with-dpms,Crash kms_pm_backlight@fade,Fail kms_pm_backlight@fade-with-dpms,Fail kms_pm_rpm@modeset-stress-extra-wait,Timeout @@ -43,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt index 4f50e0240ff4..80bf2741866c 100644 --- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt @@ -13,6 +13,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt index 47b3f1d42bb6..6fef7c1e56ea 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt @@ -1,4 +1,4 @@ -core_setmaster@master-drop-set-shared-fd,Fail +core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -8,8 +8,9 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_async_flips@test-time-stamp,Timeout -kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout +kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout +kms_cursor_crc@cursor-suspend,Timeout +kms_fb_coherency@memset-crc,Crash kms_flip@busy-flip,Timeout kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail @@ -34,17 +35,22 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail kms_lease@lease-uevent,Fail -kms_lease@page-flip-implicit-plane,Timeout +kms_pipe_stress@stress-xrgb8888-untiled,Fail +kms_pipe_stress@stress-xrgb8888-ytiled,Fail kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail +kms_plane_scaling@planes-upscale-factor-0-25,Timeout +kms_pm_backlight@brightness-with-dpms,Crash +kms_pm_backlight@fade,Crash +kms_prop_blob@invalid-set-prop-any,Fail +kms_properties@connector-properties-legacy,Timeout kms_psr2_sf@cursor-plane-update-sf,Fail kms_psr2_sf@overlay-plane-update-continuous-sf,Fail kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail kms_psr2_sf@plane-move-sf-dmg-area,Fail -kms_psr2_sf@pr-cursor-plane-update-sf,Timeout kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail kms_psr2_sf@psr2-cursor-plane-update-sf,Fail @@ -57,6 +63,7 @@ kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail kms_psr2_su@page_flip-NV12,Fail kms_psr2_su@page_flip-P010,Fail kms_setmode@basic,Fail +kms_universal_plane@disable-primary-vs-flip,Timeout perf@i915-ref-count,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash @@ -65,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt index c87ff8b40e99..c393a138b8a6 100644 --- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt @@ -9,6 +9,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* @@ -16,7 +17,6 @@ gem_.* # Hangs the machine and timeout occurs i915_pm_rc6_residency.* i915_suspend.* -xe_module_load.* api_intel_allocator.* kms_cursor_legacy.* i915_pm_rpm.* diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt index 843c363b42f5..8adf5f0a6e80 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt @@ -1,39 +1,47 @@ -core_setmaster@master-drop-set-shared-fd,Fail -core_setmaster@master-drop-set-user,Fail +core_setmaster_vs_auth,Fail gen9_exec_parse@unaligned-access,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail -kms_dirtyfb@default-dirtyfb-ioctl,Fail kms_dirtyfb@drrs-dirtyfb-ioctl,Fail -kms_dirtyfb@fbc-dirtyfb-ioctl,Fail +kms_flip@blocking-wf_vblank,Fail +kms_flip@wf_vblank-ts-check,Fail kms_flip@wf_vblank-ts-check-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail -kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail -kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail +kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail -kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail +kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout +kms_frontbuffer_tracking@fbc-tiling-linear,Fail kms_lease@lease-uevent,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail +kms_plane_scaling@planes-upscale-factor-0-25,Timeout +kms_pm_backlight@brightness-with-dpms,Crash +kms_pm_backlight@fade,Crash +kms_prop_blob@invalid-set-prop-any,Fail +kms_properties@connector-properties-legacy,Timeout +kms_rotation_crc@multiplane-rotation,Fail kms_rotation_crc@multiplane-rotation-cropping-top,Fail +kms_universal_plane@disable-primary-vs-flip,Timeout perf@non-zero-reason,Timeout sysfs_heartbeat_interval@long,Timeout +sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt index 219ae839323a..2e4ef9f35654 100644 --- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt @@ -12,6 +12,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt index 0e08fff741aa..57453e340040 100644 --- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt @@ -3,12 +3,13 @@ i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail +i915_pm_rpm@gem-execbuf-stress,Timeout kms_flip@dpms-off-confusion,Fail +kms_flip@nonexisting-fb,Fail kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail -kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail -kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail +kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip) kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail @@ -28,7 +29,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail -kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout kms_lease@lease-uevent,Fail kms_pm_rpm@modeset-stress-extra-wait,Timeout kms_rotation_crc@bad-pixel-format,Fail @@ -37,13 +37,10 @@ kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail kms_rotation_crc@multiplane-rotation-cropping-top,Fail perf@i915-ref-count,Fail perf_pmu@module-unload,Fail +perf_pmu@most-busy-idle-check-all,Fail perf_pmu@rc6,Crash +prime_busy@before-wait,Fail sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt index 1a3d87c0ca6e..8dec57da1bb3 100644 --- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt @@ -9,6 +9,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt index d4fba4f55ec1..117098bc95d9 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt @@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt index dc722d6a774e..e287462a491a 100644 --- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt @@ -12,6 +12,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt index 93d42b146df9..462c050a8b2d 100644 --- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt @@ -1,13 +1,14 @@ api_intel_allocator@reopen,Timeout api_intel_bb@destroy-bb,Timeout core_hotunplug@hotrebind-lateclose,Timeout -drm_read@short-buffer-block,Timeout dumb_buffer@map-valid,Timeout i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail i915_module_load@resize-bar,Fail +i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rps@engine-order,Timeout +i915_pm_rps@waitboost,Fail kms_lease@lease-uevent,Fail kms_rotation_crc@multiplane-rotation,Fail perf@i915-ref-count,Fail @@ -16,6 +17,7 @@ perf_pmu@enable-race,Timeout perf_pmu@module-unload,Fail perf_pmu@rc6,Crash perf_pmu@semaphore-wait-idle,Timeout +prime_busy@before,Fail prime_mmap@test_refcounting,Timeout sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout syncobj_basic@illegal-fd-to-handle,Timeout @@ -26,8 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout syncobj_wait@wait-any-complex,Timeout syncobj_wait@wait-delayed-signal,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt index 938377896841..429dc3c731df 100644 --- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt @@ -18,6 +18,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt index 1cb6978c86dc..0f167cfd503c 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt @@ -1,4 +1,4 @@ -core_setmaster@master-drop-set-shared-fd,Fail +core_setmaster_vs_auth,Fail i915_module_load@load,Fail i915_module_load@reload,Fail i915_module_load@reload-no-display,Fail @@ -6,10 +6,9 @@ i915_module_load@resize-bar,Fail i915_pm_rpm@gem-execbuf-stress,Timeout i915_pm_rpm@module-reload,Fail i915_pm_rpm@system-suspend-execbuf,Timeout -kms_async_flips@test-time-stamp,Timeout -kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout -kms_dirtyfb@default-dirtyfb-ioctl,Fail -kms_dirtyfb@fbc-dirtyfb-ioctl,Fail +kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout +kms_cursor_crc@cursor-suspend,Timeout +kms_fb_coherency@memset-crc,Crash kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail @@ -30,13 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail +kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout kms_frontbuffer_tracking@fbc-tiling-linear,Fail kms_lease@lease-uevent,Fail -kms_lease@page-flip-implicit-plane,Timeout kms_plane_alpha_blend@alpha-basic,Fail kms_plane_alpha_blend@alpha-opaque-fb,Fail kms_plane_alpha_blend@alpha-transparent-fb,Fail kms_plane_alpha_blend@constant-alpha-max,Fail +kms_plane_scaling@planes-upscale-factor-0-25,Timeout +kms_pm_backlight@brightness-with-dpms,Crash +kms_pm_backlight@fade,Crash +kms_prop_blob@invalid-set-prop-any,Fail +kms_properties@connector-properties-legacy,Timeout +kms_universal_plane@disable-primary-vs-flip,Timeout perf@i915-ref-count,Fail perf_pmu@module-unload,Fail perf_pmu@rc6,Crash @@ -45,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout sysfs_heartbeat_interval@off,Timeout sysfs_preempt_timeout@off,Timeout sysfs_timeslice_duration@off,Timeout -xe_module_load@force-load,Fail -xe_module_load@load,Fail -xe_module_load@many-reload,Fail -xe_module_load@reload,Fail -xe_module_load@reload-no-display,Fail diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt index 29bff8922ae1..7e7374ebf3d1 100644 --- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt +++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt @@ -9,6 +9,7 @@ nouveau_.* ^v3d.* ^vc4.* ^vmwgfx* +^xe.* # GEM tests takes ~1000 hours, so skip it gem_.* diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt index 4f176c04ec4e..592d7d69e6fc 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt @@ -17,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail kms_color@invalid-gamma-lut-sizes,Fail kms_cursor_legacy@cursor-vs-flip-atomic,Fail kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-atomic,Fail +kms_cursor_legacy@flip-vs-cursor-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-toggle,Fail +kms_cursor_legacy@flip-vs-cursor-varying-size,Fail +kms_flip@basic-plain-flip,Fail +kms_flip@dpms-off-confusion,Fail +kms_flip@dpms-off-confusion-interruptible,Fail +kms_flip@flip-vs-absolute-wf_vblank,Fail +kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail +kms_flip@flip-vs-blocking-wf-vblank,Fail +kms_flip@flip-vs-expired-vblank,Fail +kms_flip@flip-vs-expired-vblank-interruptible,Fail kms_flip@flip-vs-modeset-vs-hang,Fail +kms_flip@flip-vs-panning,Fail +kms_flip@flip-vs-panning-interruptible,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-suspend,Fail kms_flip@flip-vs-suspend-interruptible,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip@plain-flip-fb-recreate-interruptible,Fail +kms_flip@plain-flip-interruptible,Fail +kms_flip@plain-flip-ts-check,Fail +kms_flip@plain-flip-ts-check-interruptible,Fail +kms_invalid_mode@overflow-vrefresh,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt index 2956567c3048..443596d9e662 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt @@ -46,3 +46,10 @@ kms_prop_blob@invalid-set-prop # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_bw@connected-linear-tiling-1-displays-2160x1440p + +# Board Name: mt8173-elm-hana +# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_flip@flip-vs-wf_vblank-interruptible diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt index d0db51874aef..b5ee7323a160 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt @@ -11,6 +11,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt index 5a063361d7f2..184d0cccc318 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt @@ -1,12 +1,38 @@ -core_setmaster@master-drop-set-user,Fail dumb_buffer@create-clear,Crash kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail +kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail kms_bw@linear-tiling-1-displays-1920x1080p,Fail kms_bw@linear-tiling-1-displays-2160x1440p,Fail kms_bw@linear-tiling-1-displays-3840x2160p,Fail +kms_color@invalid-gamma-lut-sizes,Fail kms_cursor_legacy@cursor-vs-flip-atomic,Fail +kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-atomic,Fail +kms_cursor_legacy@flip-vs-cursor-legacy,Fail +kms_cursor_legacy@flip-vs-cursor-toggle,Fail +kms_cursor_legacy@flip-vs-cursor-varying-size,Fail +kms_flip@basic-flip-vs-wf_vblank,Fail +kms_flip@basic-plain-flip,Fail +kms_flip@dpms-off-confusion,Fail +kms_flip@dpms-off-confusion-interruptible,Fail +kms_flip@flip-vs-absolute-wf_vblank,Fail +kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail +kms_flip@flip-vs-blocking-wf-vblank,Fail +kms_flip@flip-vs-expired-vblank,Fail +kms_flip@flip-vs-expired-vblank-interruptible,Fail +kms_flip@flip-vs-modeset-vs-hang,Fail +kms_flip@flip-vs-panning,Fail +kms_flip@flip-vs-panning-interruptible,Fail kms_flip@flip-vs-panning-vs-hang,Fail kms_flip@flip-vs-suspend,Fail +kms_flip@flip-vs-suspend-interruptible,Fail +kms_flip@flip-vs-wf_vblank-interruptible,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip@plain-flip-fb-recreate-interruptible,Fail +kms_flip@plain-flip-interruptible,Fail +kms_flip@plain-flip-ts-check,Fail +kms_flip@plain-flip-ts-check-interruptible,Fail +kms_invalid_mode@overflow-vrefresh,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt index df7e5ce7a036..0c67fec92450 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt @@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 fbdev@write + +# Board Name: mt8183-kukui-jacuzzi-juniper-sku16 +# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_flip@basic-flip-vs-modeset + +# Board Name: mt8183-kukui-jacuzzi-juniper-sku16 +# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_atomic_transition@plane-all-modeset-transition-internal-panels + +# Board Name: mt8183-kukui-jacuzzi-juniper-sku16 +# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_atomic_transition@plane-toggle-modeset-transition diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt index d0db51874aef..b5ee7323a160 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt @@ -11,6 +11,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt index 8198e06344a3..9fd44a4b962a 100644 --- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt +++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt @@ -11,6 +11,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt index 7752adff05c1..72c469021b66 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt @@ -1,8 +1,4 @@ kms_3d,Fail -kms_cursor_legacy@forked-bo,Fail -kms_cursor_legacy@forked-move,Fail -kms_cursor_legacy@single-bo,Fail -kms_cursor_legacy@torture-bo,Fail kms_force_connector_basic@force-edid,Fail kms_hdmi_inject@inject-4k,Fail kms_lease@lease-uevent,Fail diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt index 1674c8e214d6..87724413174c 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt @@ -10,6 +10,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt index 5550be5486ed..a4d2f2a7963a 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt @@ -13,6 +13,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt index 8910afb6acf2..d270af1cca52 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt @@ -32,3 +32,10 @@ kms_lease@page-flip-implicit-plane # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_plane@plane-position-hole-dpms + +# Board Name: sc7180-trogdor-kingoftown +# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73 +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_plane@plane-position-covered diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt index 478d7c161616..d4b8ba3a54a9 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt @@ -13,6 +13,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. @@ -28,3 +29,6 @@ kms_cursor_crc@cursor-random-max-size # https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162 kms_display_modes@extended-mode-basic kms_display_modes@mst-extended-mode-negative + +# It causes other tests to fail, so skip it. +kms_invalid_mode@overflow-vrefresh diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt index cd3d3b0befe4..cafc802cecea 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt @@ -11,3 +11,10 @@ msm/msm_mapping@shadow # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_lease@page-flip-implicit-plane + +# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5 +# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74 +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_cursor_crc@cursor-random-128x128 diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt index ef9318afcd89..022db559cc7d 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt @@ -13,6 +13,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt index 38ec0305c1f4..e32d73c6c98e 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt @@ -130,3 +130,10 @@ kms_lease@page-flip-implicit-plane # IGT Version: 1.28-ga73311079 # Linux Version: 6.11.0-rc5 kms_flip@flip-vs-expired-vblank + +# Board Name: sdm845-cheza-r3 +# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75 +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_flip@plain-flip-ts-check-interruptible diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt index 2ce7f7e23a01..6c86d1953e11 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt @@ -18,6 +18,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. @@ -35,3 +36,315 @@ kms_content_protection@uevent # https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162 kms_display_modes@extended-mode-basic kms_display_modes@mst-extended-mode-negative + +# Kernel panic +msm/msm_recovery@hangcheck +# DEBUG - Begin test msm/msm_recovery@hangcheck +# Console: switching to colour dummy device 80x25 +# [ 489.526286] [IGT] msm_recovery: executing +# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck +# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU +# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179 +# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8) +# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1 +# [ 515.934727] Tainted: [W]=WARN +# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT) +# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +# [ 515.934751] pc : rcu_core+0x59c/0xe68 +# [ 515.934769] lr : rcu_core+0x74/0xe68 +# [ 515.934781] sp : ffff800080003e50 +# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002 +# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000 +# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0 +# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70 +# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000 +# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000 +# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000 +# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0 +# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000 +# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0 +# [ 515.934939] Call trace: +# [ 515.934945] rcu_core+0x59c/0xe68 (P) +# [ 515.934962] rcu_core_si+0x10/0x1c +# [ 515.934976] handle_softirqs+0x118/0x4b8 +# [ 515.934994] __do_softirq+0x14/0x20 +# [ 515.935007] ____do_softirq+0x10/0x1c +# [ 515.935021] call_on_irq_stack+0x24/0x4c +# [ 515.935034] do_softirq_own_stack+0x1c/0x28 +# [ 515.935048] __irq_exit_rcu+0x174/0x1b4 +# [ 515.935063] irq_exit_rcu+0x10/0x38 +# [ 515.935077] el1_interrupt+0x38/0x64 +# [ 515.935092] el1h_64_irq_handler+0x18/0x24 +# [ 515.935104] el1h_64_irq+0x6c/0x70 +# [ 515.935115] lock_acquire+0x1e0/0x338 (P) +# [ 515.935129] __mutex_lock+0xa8/0x4b8 +# [ 515.935144] mutex_lock_nested+0x24/0x30 +# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc +# [ 515.935174] _find_key+0x64/0x16c +# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74 +# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128 +# [ 515.935211] __cpufreq_driver_target+0x144/0x29c +# [ 515.935227] sugov_work+0x58/0x74 +# [ 515.935239] kthread_worker_fn+0xf4/0x324 +# [ 515.935254] kthread+0x12c/0x208 +# [ 515.935266] ret_from_fork+0x10/0x20 +# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0! +# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605 +# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611 +# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126] +# [ 540.124439] Modules linked in: +# [ 540.124448] irq event stamp: 9912389 +# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130 +# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64 +# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8 +# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20 +# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1 +# [ 540.124540] Tainted: [W]=WARN +# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT) +# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) +# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c +# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c +# [ 540.124581] sp : ffff800080003c20 +# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0 +# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006 +# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001 +# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50 +# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000 +# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000 +# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000 +# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f +# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080 +# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280 +# [ 540.124731] Call trace: +# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P) +# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc +# [ 540.124766] usb_submit_urb+0x294/0x560 +# [ 540.124780] intr_callback+0x78/0x1fc +# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128 +# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140 +# [ 540.124825] process_one_work+0x208/0x5e8 +# [ 540.124840] bh_worker+0x1a8/0x20c +# [ 540.124853] workqueue_softirq_action+0x78/0x88 +# [ 540.124868] tasklet_hi_action+0x14/0x3c +# [ 540.124883] handle_softirqs+0x118/0x4b8 +# [ 540.124897] __do_softirq+0x14/0x20 +# [ 540.124908] ____do_softirq+0x10/0x1c +# [ 540.124922] call_on_irq_stack+0x24/0x4c +# [ 540.124934] do_softirq_own_stack+0x1c/0x28 +# [ 540.124947] __irq_exit_rcu+0x174/0x1b4 +# [ 540.124961] irq_exit_rcu+0x10/0x38 +# [ 540.124976] el1_interrupt+0x38/0x64 +# [ 540.124987] el1h_64_irq_handler+0x18/0x24 +# [ 540.124998] el1h_64_irq+0x6c/0x70 +# [ 540.125009] lock_acquire+0x1e0/0x338 (P) +# [ 540.125023] __mutex_lock+0xa8/0x4b8 +# [ 540.125038] mutex_lock_nested+0x24/0x30 +# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc +# [ 540.125067] _find_key+0x64/0x16c +# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74 +# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128 +# [ 540.125105] __cpufreq_driver_target+0x144/0x29c +# [ 540.125121] sugov_work+0x58/0x74 +# [ 540.125133] kthread_worker_fn+0xf4/0x324 +# [ 540.125148] kthread+0x12c/0x208 +# [ 540.125160] ret_from_fork+0x10/0x20 +# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks +# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1 +# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP +# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT) +# [ 540.443022] Call trace: +# [ 540.445559] show_stack+0x18/0x24 (C) +# [ 540.449357] dump_stack_lvl+0x38/0xd0 +# [ 540.453157] dump_stack+0x18/0x24 +# [ 540.456599] panic+0x3bc/0x41c +# [ 540.459767] watchdog_timer_fn+0x254/0x2e4 +# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440 +# [ 540.468508] hrtimer_interrupt+0xe4/0x244 +# [ 540.472662] arch_timer_handler_phys+0x2c/0x44 +# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0 +# [ 540.481943] handle_irq_desc+0x40/0x58 +# [ 540.485829] generic_handle_domain_irq+0x1c/0x28 +# [ 540.490604] gic_handle_irq+0x4c/0x11c +# [ 540.494483] do_interrupt_handler+0x50/0x84 +# [ 540.498811] el1_interrupt+0x34/0x64 +# [ 540.502518] el1h_64_irq_handler+0x18/0x24 +# [ 540.506758] el1h_64_irq+0x6c/0x70 +# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P) +# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc +# [ 540.518932] usb_submit_urb+0x294/0x560 +# [ 540.522901] intr_callback+0x78/0x1fc +# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128 +# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140 +# [ 540.535614] process_one_work+0x208/0x5e8 +# [ 540.539769] bh_worker+0x1a8/0x20c +# [ 540.543293] workqueue_softirq_action+0x78/0x88 +# [ 540.547980] tasklet_hi_action+0x14/0x3c +# [ 540.552038] handle_softirqs+0x118/0x4b8 +# [ 540.556096] __do_softirq+0x14/0x20 +# [ 540.559705] ____do_softirq+0x10/0x1c +# [ 540.563500] call_on_irq_stack+0x24/0x4c +# [ 540.567554] do_softirq_own_stack+0x1c/0x28 +# [ 540.571878] __irq_exit_rcu+0x174/0x1b4 +# [ 540.575849] irq_exit_rcu+0x10/0x38 +# [ 540.579462] el1_interrupt+0x38/0x64 +# [ 540.583158] el1h_64_irq_handler+0x18/0x24 +# [ 540.587397] el1h_64_irq+0x6c/0x70 +# [ 540.590918] lock_acquire+0x1e0/0x338 (P) +# [ 540.595060] __mutex_lock+0xa8/0x4b8 +# [ 540.598760] mutex_lock_nested+0x24/0x30 +# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc +# [ 540.607503] _find_key+0x64/0x16c +# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74 +# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128 +# [ 540.620924] __cpufreq_driver_target+0x144/0x29c +# [ 540.625698] sugov_work+0x58/0x74 +# [ 540.629134] kthread_worker_fn+0xf4/0x324 +# [ 540.633278] kthread+0x12c/0x208 +# [ 540.636619] ret_from_fork+0x10/0x20 +# [ 540.640321] SMP: stopping secondary CPUs +# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000 +# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000 +# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b +# [ 540.660829] Memory Limit: none +# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]--- diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt index 329770c520d9..9450f2a002fd 100644 --- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt +++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt @@ -10,6 +10,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt index 3c7e494857b5..198deea3faa9 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt @@ -10,6 +10,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Panfrost is not a KMS driver, so skip the KMS tests kms_.* diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt index 3c7e494857b5..198deea3faa9 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt @@ -10,6 +10,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Panfrost is not a KMS driver, so skip the KMS tests kms_.* diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt index feeed89b6c3f..af99ac54c3a5 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt @@ -13,6 +13,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Panfrost is not a KMS driver, so skip the KMS tests kms_.* diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt index feeed89b6c3f..af99ac54c3a5 100644 --- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt +++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt @@ -13,6 +13,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Panfrost is not a KMS driver, so skip the KMS tests kms_.* diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt index ba9160d4d8eb..61122ea7f008 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt @@ -5,6 +5,5 @@ core_setmaster_vs_auth,Crash dumb_buffer@create-clear,Crash fbdev@pan,Crash kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail -kms_flip@flip-vs-modeset-vs-hang,Crash kms_prop_blob@invalid-set-prop,Crash kms_prop_blob@invalid-set-prop-any,Crash diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt index eb16b29dee48..71418ea35a17 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt @@ -14,6 +14,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt index 2803d0d80192..45dd8d493f6e 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt @@ -2,7 +2,6 @@ dumb_buffer@create-clear,Crash kms_atomic_transition@modeset-transition,Fail kms_atomic_transition@modeset-transition-fencing,Fail kms_atomic_transition@plane-toggle-modeset-transition,Fail -kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail kms_color@gamma,Fail kms_color@legacy-gamma,Fail kms_cursor_crc@cursor-alpha-opaque,Fail @@ -55,6 +54,7 @@ kms_flip@plain-flip-ts-check,Fail kms_flip@plain-flip-ts-check-interruptible,Fail kms_flip@wf_vblank-ts-check-interruptible,Fail kms_invalid_mode@int-max-clock,Fail +kms_invalid_mode@overflow-vrefresh,Fail kms_lease@lease-uevent,Fail kms_lease@page-flip-implicit-plane,Fail kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt index 348b4ce7eb4b..b467991d4094 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt @@ -75,58 +75,72 @@ kms_bw@linear-tiling-2-displays-2160x1440p # Linux Version: 6.11.0-rc5 kms_flip@flip-vs-expired-vblank -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u # Failure Rate: 40 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_bw@linear-tiling-1-displays-2160x1440p -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u # Failure Rate: 80 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_plane_multiple@tiling-none -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u # Failure Rate: 100 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_bw@linear-tiling-1-displays-1920x1080p -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u # Failure Rate: 100 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_plane@plane-position-hole-dpms -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u # Failure Rate: 100 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_flip@flip-vs-absolute-wf_vblank -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u # Failure Rate: 100 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_flip@flip-vs-blocking-wf-vblank -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u # Failure Rate: 60 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_bw@linear-tiling-2-displays-1920x1080p -# Board Name: hp-11A-G6-EE-grunt +# Board Name: rk3399-gru-kevin # Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u # Failure Rate: 20 # IGT Version: 1.29-g33adea9eb # Linux Version: 6.13.0-rc2 kms_flip@modeset-vs-vblank-race-interruptible + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_bw@connected-linear-tiling-1-displays-2160x1440p + +# Board Name: rk3399-gru-kevin +# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_bw@linear-tiling-1-displays-3840x2160p diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt index e8e994d92557..b83ec75161b2 100644 --- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt +++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt @@ -14,6 +14,7 @@ nouveau_.* gem_.* i915_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt index c72fee70e739..9749ddb75121 100644 --- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt +++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt @@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail kms_flip@wf_vblank-ts-check,Fail kms_flip@wf_vblank-ts-check-interruptible,Fail kms_invalid_mode@int-max-clock,Fail +kms_invalid_mode@overflow-vrefresh,Fail kms_lease@cursor-implicit-plane,Fail kms_lease@lease-uevent,Fail kms_lease@page-flip-implicit-plane,Fail diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt index adbcdd0f28d2..28e37185bac0 100644 --- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt +++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt @@ -19,6 +19,7 @@ gem_.* i915_.* xe_.* tools_test.* +kms_dp_link_training.* # Currently fails and causes coverage loss for other tests # since core_getversion also fails. diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt index 62428f3c8f31..e3ca6da8cde7 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt @@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank # IGT Version: 1.28-gf13702b8e # Linux Version: 6.10.0-rc5 kms_pipe_crc_basic@nonblocking-crc-frame-sequence + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u +# Failure Rate: 20 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_flip@modeset-vs-vblank-race + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_pipe_crc_basic@suspend-read-crc + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_plane@plane-panning-bottom-right-suspend + +# Board Name: vkms +# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u +# Failure Rate: 100 +# IGT Version: 1.30-g04bedb923 +# Linux Version: 6.14.0-rc4 +kms_vblank@ts-continuation-dpms-suspend diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt index 319789806271..716d2d4e452d 100644 --- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt +++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt @@ -1,5 +1,6 @@ # keeps printing vkms_vblank_simulate: vblank timer overrun and never ends kms_invalid_mode@int-max-clock +kms_invalid_mode@overflow-vrefresh # kernel panic seen with kms_cursor_crc tests kms_cursor_crc.* @@ -802,6 +803,7 @@ gem_.* i915_.* xe_.* tools_test.* +kms_dp_link_training.* # IGT issue. is_joiner_mode() should return false for non-Intel hardware. # https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162 diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 30c736fc0067..7d2e499ea5de 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -98,6 +98,21 @@ struct drm_bridge_connector { * HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI). */ struct drm_bridge *bridge_hdmi; + /** + * @bridge_hdmi_audio: + * + * The bridge in the chain that implements necessary support for the + * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO). + */ + struct drm_bridge *bridge_hdmi_audio; + /** + * @bridge_dp_audio: + * + * The bridge in the chain that implements necessary support for the + * DisplayPort Audio infrastructure, if any (see + * &DRM_BRIDGE_OP_DP_AUDIO). + */ + struct drm_bridge *bridge_dp_audio; }; #define to_drm_bridge_connector(x) \ @@ -433,14 +448,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector) to_drm_bridge_connector(connector); struct drm_bridge *bridge; - bridge = bridge_connector->bridge_hdmi; - if (!bridge) - return -EINVAL; + if (bridge_connector->bridge_hdmi_audio) { + bridge = bridge_connector->bridge_hdmi_audio; + + if (!bridge->funcs->hdmi_audio_startup) + return 0; + + return bridge->funcs->hdmi_audio_startup(connector, bridge); + } + + if (bridge_connector->bridge_dp_audio) { + bridge = bridge_connector->bridge_dp_audio; - if (!bridge->funcs->hdmi_audio_startup) - return 0; + if (!bridge->funcs->dp_audio_startup) + return 0; - return bridge->funcs->hdmi_audio_startup(connector, bridge); + return bridge->funcs->dp_audio_startup(connector, bridge); + } + + return -EINVAL; } static int drm_bridge_connector_audio_prepare(struct drm_connector *connector, @@ -451,11 +477,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector, to_drm_bridge_connector(connector); struct drm_bridge *bridge; - bridge = bridge_connector->bridge_hdmi; - if (!bridge) - return -EINVAL; + if (bridge_connector->bridge_hdmi_audio) { + bridge = bridge_connector->bridge_hdmi_audio; + + return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms); + } - return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms); + if (bridge_connector->bridge_dp_audio) { + bridge = bridge_connector->bridge_dp_audio; + + return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms); + } + + return -EINVAL; } static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector) @@ -464,11 +498,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector) to_drm_bridge_connector(connector); struct drm_bridge *bridge; - bridge = bridge_connector->bridge_hdmi; - if (!bridge) - return; + if (bridge_connector->bridge_hdmi_audio) { + bridge = bridge_connector->bridge_hdmi_audio; + bridge->funcs->hdmi_audio_shutdown(connector, bridge); + } - bridge->funcs->hdmi_audio_shutdown(connector, bridge); + if (bridge_connector->bridge_dp_audio) { + bridge = bridge_connector->bridge_dp_audio; + bridge->funcs->dp_audio_shutdown(connector, bridge); + } } static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector, @@ -478,15 +516,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto to_drm_bridge_connector(connector); struct drm_bridge *bridge; - bridge = bridge_connector->bridge_hdmi; - if (!bridge) - return -EINVAL; + if (bridge_connector->bridge_hdmi_audio) { + bridge = bridge_connector->bridge_hdmi_audio; + + if (!bridge->funcs->hdmi_audio_mute_stream) + return -ENOTSUPP; - if (bridge->funcs->hdmi_audio_mute_stream) return bridge->funcs->hdmi_audio_mute_stream(connector, bridge, enable, direction); - else - return -ENOTSUPP; + } + + if (bridge_connector->bridge_dp_audio) { + bridge = bridge_connector->bridge_dp_audio; + + if (!bridge->funcs->dp_audio_mute_stream) + return -ENOTSUPP; + + return bridge->funcs->dp_audio_mute_stream(connector, bridge, + enable, direction); + } + + return -EINVAL; } static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = { @@ -576,6 +626,42 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, max_bpc = bridge->max_bpc; } + if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) { + if (bridge_connector->bridge_hdmi_audio) + return ERR_PTR(-EBUSY); + + if (bridge_connector->bridge_dp_audio) + return ERR_PTR(-EBUSY); + + if (!bridge->hdmi_audio_max_i2s_playback_channels && + !bridge->hdmi_audio_spdif_playback) + return ERR_PTR(-EINVAL); + + if (!bridge->funcs->hdmi_audio_prepare || + !bridge->funcs->hdmi_audio_shutdown) + return ERR_PTR(-EINVAL); + + bridge_connector->bridge_hdmi_audio = bridge; + } + + if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) { + if (bridge_connector->bridge_dp_audio) + return ERR_PTR(-EBUSY); + + if (bridge_connector->bridge_hdmi_audio) + return ERR_PTR(-EBUSY); + + if (!bridge->hdmi_audio_max_i2s_playback_channels && + !bridge->hdmi_audio_spdif_playback) + return ERR_PTR(-EINVAL); + + if (!bridge->funcs->dp_audio_prepare || + !bridge->funcs->dp_audio_shutdown) + return ERR_PTR(-EINVAL); + + bridge_connector->bridge_dp_audio = bridge; + } + if (!drm_bridge_get_next_bridge(bridge)) connector_type = bridge->type; @@ -611,22 +697,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, max_bpc); if (ret) return ERR_PTR(ret); - - if (bridge->hdmi_audio_max_i2s_playback_channels || - bridge->hdmi_audio_spdif_playback) { - if (!bridge->funcs->hdmi_audio_prepare || - !bridge->funcs->hdmi_audio_shutdown) - return ERR_PTR(-EINVAL); - - ret = drm_connector_hdmi_audio_init(connector, - bridge->hdmi_audio_dev, - &drm_bridge_connector_hdmi_audio_funcs, - bridge->hdmi_audio_max_i2s_playback_channels, - bridge->hdmi_audio_spdif_playback, - bridge->hdmi_audio_dai_port); - if (ret) - return ERR_PTR(ret); - } } else { ret = drmm_connector_init(drm, connector, &drm_bridge_connector_funcs, @@ -635,6 +705,24 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, return ERR_PTR(ret); } + if (bridge_connector->bridge_hdmi_audio || + bridge_connector->bridge_dp_audio) { + struct device *dev; + + if (bridge_connector->bridge_hdmi_audio) + dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev; + else + dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev; + + ret = drm_connector_hdmi_audio_init(connector, dev, + &drm_bridge_connector_hdmi_audio_funcs, + bridge->hdmi_audio_max_i2s_playback_channels, + bridge->hdmi_audio_spdif_playback, + bridge->hdmi_audio_dai_port); + if (ret) + return ERR_PTR(ret); + } + drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs); if (bridge_connector->bridge_hpd) diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c index 56a4965e518c..ed31471bd0e2 100644 --- a/drivers/gpu/drm/display/drm_dp_cec.c +++ b/drivers/gpu/drm/display/drm_dp_cec.c @@ -96,7 +96,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable) u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0; ssize_t err = 0; - err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val); return (enable && err < 0) ? err : 0; } @@ -112,7 +112,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr) la_mask |= adap->log_addrs.log_addr_mask | (1 << addr); mask[0] = la_mask & 0xff; mask[1] = la_mask >> 8; - err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2); + err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2); return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0; } @@ -123,15 +123,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, unsigned int retries = min(5, attempts - 1); ssize_t err; - err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER, - msg->msg, msg->len); + err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER, + msg->msg, msg->len); if (err < 0) return err; - err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO, - (msg->len - 1) | (retries << 4) | - DP_CEC_TX_MESSAGE_SEND); - return err < 0 ? err : 0; + return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO, + (msg->len - 1) | (retries << 4) | + DP_CEC_TX_MESSAGE_SEND); } static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap, @@ -144,13 +143,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap, if (!(adap->capabilities & CEC_CAP_MONITOR_ALL)) return 0; - err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val); - if (err >= 0) { + err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val); + if (!err) { if (enable) val |= DP_CEC_SNOOPING_ENABLE; else val &= ~DP_CEC_SNOOPING_ENABLE; - err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val); } return (enable && err < 0) ? err : 0; } @@ -194,7 +193,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux) u8 rx_msg_info; ssize_t err; - err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info); + err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info); if (err < 0) return err; @@ -202,7 +201,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux) return 0; msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1; - err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len); + err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len); if (err < 0) return err; @@ -215,7 +214,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux) struct cec_adapter *adap = aux->cec.adap; u8 flags; - if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0) return; if (flags & DP_CEC_RX_MESSAGE_INFO_VALID) @@ -230,7 +229,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux) (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR)) cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES); - drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags); + drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags); } /** @@ -253,13 +252,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux) if (!aux->cec.adap) goto unlock; - ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, - &cec_irq); + ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, + &cec_irq); if (ret < 0 || !(cec_irq & DP_CEC_IRQ)) goto unlock; drm_dp_cec_handle_irq(aux); - drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ); + drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ); unlock: mutex_unlock(&aux->cec.lock); } @@ -269,7 +268,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap) { u8 cap = 0; - if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 || + if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 || !(cap & DP_CEC_TUNNELING_CAPABLE)) return false; if (cec_cap) diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index dbce1c3f4969..57828f2b7b5a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -327,7 +327,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI if (offset < DP_RECEIVER_CAP_SIZE) { rd_interval = dpcd[offset]; } else { - if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) { + if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) { drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n", aux->name); /* arbitrary default delay */ @@ -358,7 +358,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux) int unit; u8 val; - if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) { + if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) { drm_err(aux->drm_dev, "%s: failed rd interval read\n", aux->name); /* default to max */ @@ -704,6 +704,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered); * function returns -EPROTO. Errors from the underlying AUX channel transfer * function, with the exception of -EBUSY (which causes the transaction to * be retried), are propagated to the caller. + * + * In most of the cases you want to use drm_dp_dpcd_read_data() instead. */ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size) @@ -752,6 +754,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read); * function returns -EPROTO. Errors from the underlying AUX channel transfer * function, with the exception of -EBUSY (which causes the transaction to * be retried), are propagated to the caller. + * + * In most of the cases you want to use drm_dp_dpcd_write_data() instead. */ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size) @@ -774,14 +778,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write); * @aux: DisplayPort AUX channel * @status: buffer to store the link status in (must be at least 6 bytes) * - * Returns the number of bytes transferred on success or a negative error - * code on failure. + * Returns a negative error code on failure or 0 on success. */ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, u8 status[DP_LINK_STATUS_SIZE]) { - return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status, - DP_LINK_STATUS_SIZE); + return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status, + DP_LINK_STATUS_SIZE); } EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); @@ -804,30 +807,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, { int ret; - if (dp_phy == DP_PHY_DPRX) { - ret = drm_dp_dpcd_read(aux, - DP_LANE0_1_STATUS, - link_status, - DP_LINK_STATUS_SIZE); - - if (ret < 0) - return ret; - - WARN_ON(ret != DP_LINK_STATUS_SIZE); - - return 0; - } + if (dp_phy == DP_PHY_DPRX) + return drm_dp_dpcd_read_data(aux, + DP_LANE0_1_STATUS, + link_status, + DP_LINK_STATUS_SIZE); - ret = drm_dp_dpcd_read(aux, - DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy), - link_status, - DP_LINK_STATUS_SIZE - 1); + ret = drm_dp_dpcd_read_data(aux, + DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy), + link_status, + DP_LINK_STATUS_SIZE - 1); if (ret < 0) return ret; - WARN_ON(ret != DP_LINK_STATUS_SIZE - 1); - /* Convert the LTTPR to the sink PHY link status layout */ memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1], &link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS], @@ -838,12 +831,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, } EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status); +/** + * drm_dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @revision: DPCD revision supported on the link + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (revision < DP_DPCD_REV_11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_up); + +/** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @revision: DPCD revision supported on the link + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (revision < DP_DPCD_REV_11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_down); + static int read_payload_update_status(struct drm_dp_aux *aux) { int ret; u8 status; - ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); if (ret < 0) return ret; @@ -870,21 +932,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux, int ret; int retries = 0; - drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, - DP_PAYLOAD_TABLE_UPDATED); + drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, + DP_PAYLOAD_TABLE_UPDATED); payload_alloc[0] = vcpid; payload_alloc[1] = start_time_slot; payload_alloc[2] = time_slot_count; - ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); - if (ret != 3) { + ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret); goto fail; } retry: - ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret); goto fail; @@ -1040,15 +1102,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, { u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0; - if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, - &auto_test_req, 1) < 1) { + if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + &auto_test_req) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n", aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR); return false; } auto_test_req &= DP_AUTOMATED_TEST_REQUEST; - if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) { + if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n", aux->name, DP_TEST_REQUEST); return false; @@ -1061,23 +1123,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, return false; } - if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, - &auto_test_req, 1) < 1) { + if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + auto_test_req) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n", aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR); return false; } /* send back checksum for the last edid extension block data */ - if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM, - &real_edid_checksum, 1) < 1) { + if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM, + real_edid_checksum) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n", aux->name, DP_TEST_EDID_CHECKSUM); return false; } test_resp |= DP_TEST_EDID_CHECKSUM_WRITE; - if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) { + if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n", aux->name, DP_TEST_RESPONSE); return false; @@ -1114,12 +1176,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux, DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) return 0; - ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext, - sizeof(dpcd_ext)); + ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext, + sizeof(dpcd_ext)); if (ret < 0) return ret; - if (ret != sizeof(dpcd_ext)) - return -EIO; if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { drm_dbg_kms(aux->drm_dev, @@ -1156,10 +1216,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, { int ret; - ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE); + ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE); if (ret < 0) return ret; - if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0) + if (dpcd[DP_DPCD_REV] == 0) return -EIO; ret = drm_dp_read_extended_dpcd_caps(aux, dpcd); @@ -1209,11 +1269,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux, if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) len *= 4; - ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); + ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); if (ret < 0) return ret; - if (ret != len) - return -EIO; drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports); @@ -1570,7 +1628,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode); */ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]) { - return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6); + return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6); } EXPORT_SYMBOL(drm_dp_downstream_id); @@ -1635,13 +1693,13 @@ void drm_dp_downstream_debug(struct seq_file *m, drm_dp_downstream_id(aux, id); seq_printf(m, "\t\tID: %s\n", id); - len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1); - if (len > 0) + len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1); + if (!len) seq_printf(m, "\t\tHW: %d.%d\n", (rev[0] & 0xf0) >> 4, rev[0] & 0xf); - len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); - if (len > 0) + len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2); + if (!len) seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); if (detailed_cap_info) { @@ -1779,11 +1837,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux) u8 count; int ret; - ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count); + ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count); if (ret < 0) return ret; - if (ret != 1) - return -EIO; return DP_GET_SINK_COUNT(count); } @@ -2172,13 +2228,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc) u8 buf, count; int ret; - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf); if (ret < 0) return ret; WARN_ON(!(buf & DP_TEST_SINK_START)); - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf); if (ret < 0) return ret; @@ -2192,11 +2248,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc) * At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes * per component (RGB or CrYCb). */ - ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6); } static void drm_dp_aux_crc_work(struct work_struct *work) @@ -2395,11 +2447,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf); if (ret < 0) return ret; - ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START); + ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START); if (ret < 0) return ret; @@ -2422,11 +2474,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf); if (ret < 0) return ret; - ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START); + ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START); if (ret < 0) return ret; @@ -2512,11 +2564,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset, struct drm_dp_dpcd_ident *ident) { - int ret; - - ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); - - return ret < 0 ? ret : 0; + return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident)); } static void drm_dp_dump_desc(struct drm_dp_aux *aux, @@ -2774,13 +2822,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux, int ret; for (offset = 0; offset < buf_size; offset += block_size) { - ret = drm_dp_dpcd_read(aux, - address + offset, - &buf[offset], block_size); + ret = drm_dp_dpcd_read_data(aux, + address + offset, + &buf[offset], block_size); if (ret < 0) return ret; - - WARN_ON(ret != block_size); } return 0; @@ -2995,12 +3041,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, int err; u8 rate, lanes; - err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate); + err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate); if (err < 0) return err; data->link_rate = drm_dp_bw_code_to_link_rate(rate); - err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes); + err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes); if (err < 0) return err; data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK; @@ -3008,22 +3054,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, if (lanes & DP_ENHANCED_FRAME_CAP) data->enhanced_frame_cap = true; - err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern); + err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern); if (err < 0) return err; switch (data->phy_pattern) { case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: - err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, - &data->custom80, sizeof(data->custom80)); + err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + &data->custom80, sizeof(data->custom80)); if (err < 0) return err; break; case DP_PHY_TEST_PATTERN_CP2520: - err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET, - &data->hbr2_reset, - sizeof(data->hbr2_reset)); + err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET, + &data->hbr2_reset, + sizeof(data->hbr2_reset)); if (err < 0) return err; } @@ -3050,15 +3096,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, if (dp_rev < 0x12) { test_pattern = (test_pattern << 2) & DP_LINK_QUAL_PATTERN_11_MASK; - err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, - test_pattern); + err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET, + test_pattern); if (err < 0) return err; } else { for (i = 0; i < data->num_lanes; i++) { - err = drm_dp_dpcd_writeb(aux, - DP_LINK_QUAL_LANE0_SET + i, - test_pattern); + err = drm_dp_dpcd_write_byte(aux, + DP_LINK_QUAL_LANE0_SET + i, + test_pattern); if (err < 0) return err; } @@ -3265,8 +3311,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13) return false; - if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, - &rx_feature) != 1) { + if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, + &rx_feature) < 0) { drm_dbg_dp(aux->drm_dev, "Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n"); return false; @@ -3290,7 +3336,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13) return false; - if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) { + if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) { drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n"); return false; } @@ -3421,16 +3467,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw); */ int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd) { - int ret; u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE | DP_PCON_ENABLE_LINK_FRL_MODE; if (enable_frl_ready_hpd) buf |= DP_PCON_ENABLE_HPD_READY; - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); - - return ret; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); } EXPORT_SYMBOL(drm_dp_pcon_frl_prepare); @@ -3445,7 +3488,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux) int ret; u8 buf; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); if (ret < 0) return false; @@ -3474,7 +3517,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, int ret; u8 buf; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); if (ret < 0) return ret; @@ -3509,11 +3552,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, return -EINVAL; } - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); } EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1); @@ -3539,7 +3578,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, else buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED; - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf); + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf); if (ret < 0) return ret; @@ -3555,13 +3594,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2); */ int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux) { - int ret; - - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0); } EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config); @@ -3576,7 +3609,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux) int ret; u8 buf = 0; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); if (ret < 0) return ret; if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) { @@ -3585,11 +3618,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux) return -EINVAL; } buf |= DP_PCON_ENABLE_HDMI_LINK; - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); } EXPORT_SYMBOL(drm_dp_pcon_frl_enable); @@ -3604,7 +3633,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); if (ret < 0) return false; @@ -3629,7 +3658,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask) int mode; int ret; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf); if (ret < 0) return ret; @@ -3658,7 +3687,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; for (i = 0; i < hdmi->max_lanes; i++) { - if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0) return; error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK; @@ -3793,7 +3822,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); if (ret < 0) return ret; @@ -3804,11 +3833,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) buf |= pps_buf_config << 2; } - ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); } /** @@ -3820,13 +3845,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) */ int drm_dp_pcon_pps_default(struct drm_dp_aux *aux) { - int ret; - - ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED); - if (ret < 0) - return ret; - - return 0; + return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED); } EXPORT_SYMBOL(drm_dp_pcon_pps_default); @@ -3842,15 +3861,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]) { int ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128); if (ret < 0) return ret; - ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); - if (ret < 0) - return ret; - - return 0; + return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); } EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf); @@ -3867,21 +3882,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]) { int ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2); if (ret < 0) return ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2); if (ret < 0) return ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2); if (ret < 0) return ret; - ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); - if (ret < 0) - return ret; - - return 0; + return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); } EXPORT_SYMBOL(drm_dp_pcon_pps_override_param); @@ -3897,7 +3908,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc) int ret; u8 buf; - ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); if (ret < 0) return ret; @@ -3906,11 +3917,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc) else buf &= ~DP_CONVERSION_RGB_YCBCR_MASK; - ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); } EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr); @@ -3942,12 +3949,12 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac buf[0] = level; } - ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf)); - if (ret != sizeof(buf)) { + ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf)); + if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to write aux backlight level: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } return 0; @@ -3965,22 +3972,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli if (!bl->aux_enable) return 0; - ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf); + if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } if (enable) buf |= DP_EDP_BACKLIGHT_ENABLE; else buf &= ~DP_EDP_BACKLIGHT_ENABLE; - ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf); - if (ret != 1) { + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf); + if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } return 0; @@ -4016,15 +4023,16 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM; if (bl->pwmgen_bit_count) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); - if (ret != 1) + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); + if (ret < 0) drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); } if (bl->pwm_freq_pre_divider) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider); - if (ret != 1) + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET, + bl->pwm_freq_pre_divider); + if (ret < 0) drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight frequency: %d\n", aux->name, ret); @@ -4032,8 +4040,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; } - ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf); - if (ret != 1) { + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", aux->name, ret); return ret < 0 ? ret : -EIO; @@ -4088,8 +4096,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf if (!bl->aux_set) return 0; - ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", aux->name, ret); return -ENODEV; @@ -4122,14 +4130,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf * - FxP is within 25% of desired value. * Note: 25% is arbitrary value and may need some tweak. */ - ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", aux->name, ret); return 0; } - ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", aux->name, ret); return 0; @@ -4154,8 +4162,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf break; } - ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); - if (ret != 1) { + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); return 0; @@ -4180,8 +4188,8 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i u8 buf[2]; u8 mode_reg; - ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n", aux->name, ret); return ret < 0 ? ret : -EIO; @@ -4194,11 +4202,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { int size = 1 + bl->lsb_reg_used; - ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size); - if (ret != size) { + ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } if (bl->lsb_reg_used) @@ -4343,8 +4351,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux) if (!panel || !panel->dev || !aux) return -EINVAL; - ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd, - EDP_DISPLAY_CTL_CAP_SIZE); + ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd, + EDP_DISPLAY_CTL_CAP_SIZE); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 3a1f1ffc7b55..a89f38fd3218 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -2192,24 +2192,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid) guid_copy(&mstb->guid, guid); if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) { + struct drm_dp_aux *aux; u8 buf[UUID_SIZE]; export_guid(buf, &mstb->guid); - if (mstb->port_parent) { - ret = drm_dp_send_dpcd_write(mstb->mgr, - mstb->port_parent, - DP_GUID, sizeof(buf), buf); - } else { - ret = drm_dp_dpcd_write(mstb->mgr->aux, - DP_GUID, buf, sizeof(buf)); - } - } + if (mstb->port_parent) + aux = &mstb->port_parent->aux; + else + aux = mstb->mgr->aux; - if (ret < 16 && ret > 0) - return -EPROTO; + ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf)); + } - return ret == 16 ? 0 : ret; + return ret; } static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, @@ -2744,14 +2740,13 @@ retry: do { tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); - ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, - &msg[offset], - tosend); - if (ret != tosend) { - if (ret == -EIO && retries < 5) { - retries++; - goto retry; - } + ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset, + &msg[offset], + tosend); + if (ret == -EIO && retries < 5) { + retries++; + goto retry; + } else if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); return -EIO; @@ -3624,7 +3619,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) return DRM_DP_SST; - if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) + if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0) return DRM_DP_SST; if (mstm_cap & DP_MST_CAP) @@ -3679,10 +3674,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_primary = mstb; drm_dp_mst_topology_get_mstb(mgr->mst_primary); - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | - DP_UP_REQ_EN | - DP_UPSTREAM_IS_SRC); + ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); if (ret < 0) goto out_unlock; @@ -3697,7 +3692,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mstb = mgr->mst_primary; mgr->mst_primary = NULL; /* this can fail if the device is gone */ - drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; mgr->payload_id_table_cleared = false; @@ -3763,8 +3758,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe); void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) { mutex_lock(&mgr->lock); - drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UPSTREAM_IS_SRC); + drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); flush_work(&mgr->up_req_work); flush_work(&mgr->work); @@ -3813,18 +3808,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, goto out_fail; } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | - DP_UP_REQ_EN | - DP_UPSTREAM_IS_SRC); + ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); if (ret < 0) { drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); goto out_fail; } /* Some hubs forget their guids after they resume */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); - if (ret != sizeof(buf)) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret < 0) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } @@ -3883,8 +3878,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, *mstb = NULL; len = min(mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); - if (ret != len) { + ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len); + if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); return false; } @@ -3922,9 +3917,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, curreply = len; while (replylen > 0) { len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, - replyblock, len); - if (ret != len) { + ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply, + replyblock, len); + if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", len, ret); return false; @@ -4881,9 +4876,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, int i; for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { - if (drm_dp_dpcd_read(mgr->aux, - DP_PAYLOAD_TABLE_UPDATE_STATUS + i, - &buf[i], 16) != 16) + if (drm_dp_dpcd_read_data(mgr->aux, + DP_PAYLOAD_TABLE_UPDATE_STATUS + i, + &buf[i], 16) < 0) return false; } return true; @@ -4972,23 +4967,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m, } seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); - ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); - if (ret != 2) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2); + if (ret < 0) { seq_printf(m, "faux/mst read failed\n"); goto out; } seq_printf(m, "faux/mst: %*ph\n", 2, buf); - ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); - if (ret != 1) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1); + if (ret < 0) { seq_printf(m, "mst ctrl read failed\n"); goto out; } seq_printf(m, "mst ctrl: %*ph\n", 1, buf); /* dump the standard OUI branch header */ - ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); - if (ret != DP_BRANCH_OUI_HEADER_SIZE) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf, + DP_BRANCH_OUI_HEADER_SIZE); + if (ret < 0) { seq_printf(m, "branch oui read failed\n"); goto out; } @@ -6112,14 +6108,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) /* DP-to-DP peer device */ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { - if (drm_dp_dpcd_read(&port->aux, - DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&port->aux, - DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&immediate_upstream_port->aux, - DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) < 0) return NULL; /* Enpoint decompression with DP-to-DP peer device */ @@ -6157,8 +6153,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; - if (drm_dp_dpcd_read(immediate_upstream_aux, - DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(immediate_upstream_aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) < 0) return NULL; if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) @@ -6180,11 +6176,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) * therefore the endpoint needs to be * both DSC and FEC capable. */ - if (drm_dp_dpcd_read(&port->aux, - DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&port->aux, - DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0) return NULL; if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && (endpoint_fec & DP_FEC_CAPABLE)) diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c index 90fe07a89260..076edf161048 100644 --- a/drivers/gpu/drm/display/drm_dp_tunnel.c +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -222,7 +222,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r while ((len = next_reg_area(&offset))) { int address = DP_TUNNELING_BASE + offset; - if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0) + if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0) return -EIO; offset += len; @@ -913,7 +913,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ; u8 val; - if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) + if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) goto out_err; if (enable) @@ -921,7 +921,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) else val &= ~mask; - if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) + if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) goto out_err; tunnel->bw_alloc_enabled = enable; @@ -1039,7 +1039,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux) { u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED; - if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0) + if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0) return -EIO; return 0; @@ -1052,7 +1052,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed) u8 val; int err; - if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0) return -EIO; *status_changed = val & status_change_mask; @@ -1095,7 +1095,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw) if (err) goto out; - if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) { + if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) { err = -EIO; goto out; } @@ -1196,13 +1196,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel) u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED; u8 val; - if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) + if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) goto out_err; val &= mask; if (val) { - if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) + if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) goto out_err; return 1; @@ -1215,7 +1215,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel) * Check for estimated BW changes explicitly to account for lost * BW change notifications. */ - if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) + if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) goto out_err; if (val * tunnel->bw_granularity != tunnel->estimated_bw) @@ -1300,7 +1300,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a { u8 val; - if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0) return -EIO; if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED)) diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c index 74dd4d01dd9b..855cb02b827d 100644 --- a/drivers/gpu/drm/display/drm_hdmi_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_helper.c @@ -256,3 +256,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode, return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8); } EXPORT_SYMBOL(drm_hdmi_compute_mode_clock); + +struct drm_hdmi_acr_n_cts_entry { + unsigned int n; + unsigned int cts; +}; + +struct drm_hdmi_acr_data { + unsigned long tmds_clock_khz; + struct drm_hdmi_acr_n_cts_entry n_cts_32k, + n_cts_44k1, + n_cts_48k; +}; + +static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = { + { + /* "Other" entry */ + .n_cts_32k = { .n = 4096, }, + .n_cts_44k1 = { .n = 6272, }, + .n_cts_48k = { .n = 6144, }, + }, { + .tmds_clock_khz = 25175, + .n_cts_32k = { .n = 4576, .cts = 28125, }, + .n_cts_44k1 = { .n = 7007, .cts = 31250, }, + .n_cts_48k = { .n = 6864, .cts = 28125, }, + }, { + .tmds_clock_khz = 25200, + .n_cts_32k = { .n = 4096, .cts = 25200, }, + .n_cts_44k1 = { .n = 6272, .cts = 28000, }, + .n_cts_48k = { .n = 6144, .cts = 25200, }, + }, { + .tmds_clock_khz = 27000, + .n_cts_32k = { .n = 4096, .cts = 27000, }, + .n_cts_44k1 = { .n = 6272, .cts = 30000, }, + .n_cts_48k = { .n = 6144, .cts = 27000, }, + }, { + .tmds_clock_khz = 27027, + .n_cts_32k = { .n = 4096, .cts = 27027, }, + .n_cts_44k1 = { .n = 6272, .cts = 30030, }, + .n_cts_48k = { .n = 6144, .cts = 27027, }, + }, { + .tmds_clock_khz = 54000, + .n_cts_32k = { .n = 4096, .cts = 54000, }, + .n_cts_44k1 = { .n = 6272, .cts = 60000, }, + .n_cts_48k = { .n = 6144, .cts = 54000, }, + }, { + .tmds_clock_khz = 54054, + .n_cts_32k = { .n = 4096, .cts = 54054, }, + .n_cts_44k1 = { .n = 6272, .cts = 60060, }, + .n_cts_48k = { .n = 6144, .cts = 54054, }, + }, { + .tmds_clock_khz = 74176, + .n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */ + .n_cts_44k1 = { .n = 17836, .cts = 234375, }, + .n_cts_48k = { .n = 11648, .cts = 140625, }, + }, { + .tmds_clock_khz = 74250, + .n_cts_32k = { .n = 4096, .cts = 74250, }, + .n_cts_44k1 = { .n = 6272, .cts = 82500, }, + .n_cts_48k = { .n = 6144, .cts = 74250, }, + }, { + .tmds_clock_khz = 148352, + .n_cts_32k = { .n = 11648, .cts = 421875, }, + .n_cts_44k1 = { .n = 8918, .cts = 234375, }, + .n_cts_48k = { .n = 5824, .cts = 140625, }, + }, { + .tmds_clock_khz = 148500, + .n_cts_32k = { .n = 4096, .cts = 148500, }, + .n_cts_44k1 = { .n = 6272, .cts = 165000, }, + .n_cts_48k = { .n = 6144, .cts = 148500, }, + }, { + .tmds_clock_khz = 296703, + .n_cts_32k = { .n = 5824, .cts = 421875, }, + .n_cts_44k1 = { .n = 4459, .cts = 234375, }, + .n_cts_48k = { .n = 5824, .cts = 281250, }, + }, { + .tmds_clock_khz = 297000, + .n_cts_32k = { .n = 3072, .cts = 222750, }, + .n_cts_44k1 = { .n = 4704, .cts = 247500, }, + .n_cts_48k = { .n = 5120, .cts = 247500, }, + }, { + .tmds_clock_khz = 593407, + .n_cts_32k = { .n = 5824, .cts = 843750, }, + .n_cts_44k1 = { .n = 8918, .cts = 937500, }, + .n_cts_48k = { .n = 5824, .cts = 562500, }, + }, { + .tmds_clock_khz = 594000, + .n_cts_32k = { .n = 3072, .cts = 445500, }, + .n_cts_44k1 = { .n = 9408, .cts = 990000, }, + .n_cts_48k = { .n = 6144, .cts = 594000, }, + }, +}; + +static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz) +{ + int i; + + /* skip the "other" entry */ + for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) { + if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz) + return i; + } + + return 0; +} + +/** + * drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration + * + * @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector + * @sample_rate: audio sample rate + * @out_n: a pointer to write the N value + * @out_cts: a pointer to write the CTS value + * + * Get the N and CTS values (either by calculating them or by returning data + * from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock + * Capture and Regeneration". + * + * Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6 + * on how to select Fs for non-L-PCM formats. + */ +void +drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate, + unsigned int sample_rate, + unsigned int *out_n, + unsigned int *out_cts) +{ + /* be a bit more tolerant, especially for the 1.001 entries */ + unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000); + const struct drm_hdmi_acr_n_cts_entry *entry; + unsigned int n, cts, mult; + int tmds_idx; + + tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz); + + /* + * Don't change the order, 192 kHz is divisible by 48k and 32k, but it + * should use 48k entry. + */ + if (sample_rate % 48000 == 0) { + entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k; + mult = sample_rate / 48000; + } else if (sample_rate % 44100 == 0) { + entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1; + mult = sample_rate / 44100; + } else if (sample_rate % 32000 == 0) { + entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k; + mult = sample_rate / 32000; + } else { + entry = NULL; + } + + if (entry) { + n = entry->n * mult; + cts = entry->cts; + } else { + /* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */ + n = 128 * sample_rate / 1000; + cts = 0; + } + + if (!cts) + cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n, + 128 * sample_rate); + + *out_n = n; + *out_cts = cts; +} +EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts); diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c index c205f37da1e1..d9d9948b29e9 100644 --- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c @@ -10,6 +10,298 @@ #include <drm/display/drm_hdmi_state_helper.h> /** + * DOC: hdmi helpers + * + * These functions contain an implementation of the HDMI specification + * in the form of KMS helpers. + * + * It contains TMDS character rate computation, automatic selection of + * output formats, infoframes generation, etc. + * + * Infoframes Compliance + * ~~~~~~~~~~~~~~~~~~~~~ + * + * Drivers using the helpers will expose the various infoframes + * generated according to the HDMI specification in debugfs. + * + * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project + * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like: + * + * .. code-block:: bash + * + * # edid-decode \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \ + * /sys/class/drm/card1-HDMI-A-1/edid \ + * -c + * + * edid-decode (hex): + * + * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00 + * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25 + * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01 + * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20 + * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e + * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c + * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff + * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46 + * + * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f + * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00 + * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03 + * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52 + * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b + * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3 + * + * ---------------- + * + * Block 0, Base EDID: + * EDID Structure Version & Revision: 1.3 + * Vendor & Product Identification: + * Manufacturer: GSM + * Model: 23540 + * Serial Number: 454430 (0x0006ef1e) + * Made in: week 7 of 2022 + * Basic Display Parameters & Features: + * Digital display + * Maximum image size: 47 cm x 52 cm + * Gamma: 2.20 + * DPMS levels: Standby Suspend Off + * RGB color display + * First detailed timing is the preferred timing + * Color Characteristics: + * Red : 0.6835, 0.3105 + * Green: 0.2587, 0.6679 + * Blue : 0.1445, 0.0585 + * White: 0.3134, 0.3291 + * Established Timings I & II: + * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz + * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz + * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz + * Standard Timings: + * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz + * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz + * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz + * Detailed Timing Descriptors: + * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm) + * Hfront 48 Hsync 32 Hback 240 Hpol P + * Vfront 3 Vsync 10 Vback 199 Vpol N + * Display Range Limits: + * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz + * Display Product Name: 'LG SDQHD' + * Display Product Serial Number: '207NTRLDC430' + * Extension blocks: 1 + * Checksum: 0x46 + * + * ---------------- + * + * Block 1, CTA-861 Extension Block: + * Revision: 3 + * Basic audio support + * Supports YCbCr 4:4:4 + * Supports YCbCr 4:2:2 + * Native detailed modes: 2 + * Audio Data Block: + * Linear PCM: + * Max channels: 2 + * Supported sample rates (kHz): 48 44.1 32 + * Supported sample sizes (bits): 24 20 16 + * Video Data Block: + * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz + * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz + * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz + * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native) + * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz + * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz + * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz + * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz + * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz + * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz + * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz + * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz + * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz + * Speaker Allocation Data Block: + * FL/FR - Front Left/Right + * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: + * Source physical address: 1.0.0.0 + * Supports_AI + * DC_36bit + * DC_30bit + * DC_Y444 + * Maximum TMDS clock: 300 MHz + * Extended HDMI video details: + * HDMI VICs: + * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz + * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz + * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz + * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8: + * Version: 1 + * Maximum TMDS Character Rate: 600 MHz + * SCDC Present + * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding + * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding + * YCbCr 4:2:0 Capability Map Data Block: + * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz + * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz + * Video Capability Data Block: + * YCbCr quantization: No Data + * RGB quantization: Selectable (via AVI Q) + * PT scan behavior: Always Underscanned + * IT scan behavior: Always Underscanned + * CE scan behavior: Always Underscanned + * Colorimetry Data Block: + * BT2020YCC + * BT2020RGB + * HDR Static Metadata Data Block: + * Electro optical transfer functions: + * Traditional gamma - SDR luminance range + * SMPTE ST2084 + * Supported static metadata descriptors: + * Static metadata type 1 + * Desired content max luminance: 82 (295.365 cd/m^2) + * Desired content max frame-average luminance: 82 (295.365 cd/m^2) + * Desired content min luminance: 81 (0.298 cd/m^2) + * Detailed Timing Descriptors: + * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm) + * Hfront 48 Hsync 32 Hback 80 Hpol P + * Vfront 3 Vsync 10 Vback 28 Vpol N + * Checksum: 0xc3 Unused space in Extension Block: 43 bytes + * + * ---------------- + * + * edid-decode 1.29.0-5346 + * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18 + * + * Warnings: + * + * Block 1, CTA-861 Extension Block: + * IT Video Formats are overscanned by default, but normally this should be underscanned. + * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended? + * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended? + * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues. + * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead. + * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues. + * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0. + * EDID: + * Base EDID: Some timings are out of range of the Monitor Ranges: + * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz) + * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz) + * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz) + * + * Failures: + * + * Block 1, CTA-861 Extension Block: + * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned. + * EDID: + * CTA-861: Native progressive timings are a mix of several resolutions. + * + * EDID conformity: FAIL + * + * ================ + * + * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty. + * + * ================ + * + * edid-decode InfoFrame (hex): + * + * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00 + * 00 + * + * ---------------- + * + * HDMI InfoFrame Checksum: 0x31 + * + * AVI InfoFrame + * Version: 2 + * Length: 13 + * Y: Color Component Sample Format: RGB + * A: Active Format Information Present: Yes + * B: Bar Data Present: Bar Data not present + * S: Scan Information: Composed for an underscanned display + * C: Colorimetry: No Data + * M: Picture Aspect Ratio: 16:9 + * R: Active Portion Aspect Ratio: 8 + * ITC: IT Content: No Data + * EC: Extended Colorimetry: xvYCC601 + * Q: RGB Quantization Range: Limited Range + * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling + * YQ: YCC Quantization Range: Limited Range + * CN: IT Content Type: Graphics + * PR: Pixel Data Repetition Count: 0 + * Line Number of End of Top Bar: 0 + * Line Number of Start of Bottom Bar: 0 + * Pixel Number of End of Left Bar: 0 + * Pixel Number of Start of Right Bar: 0 + * + * ---------------- + * + * AVI InfoFrame conformity: PASS + * + * ================ + * + * edid-decode InfoFrame (hex): + * + * 81 01 05 49 03 0c 00 20 01 + * + * ---------------- + * + * HDMI InfoFrame Checksum: 0x49 + * + * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 + * Version: 1 + * Length: 5 + * HDMI Video Format: HDMI_VIC is present + * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz + * + * ---------------- + * + * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS + * + * ================ + * + * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty. + * + * ================ + * + * edid-decode InfoFrame (hex): + * + * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65 + * 6f 63 6f 72 65 00 00 00 00 00 00 00 09 + * + * ---------------- + * + * HDMI InfoFrame Checksum: 0x93 + * + * Source Product Description InfoFrame + * Version: 1 + * Length: 25 + * Vendor Name: 'Broadcom' + * Product Description: 'Videocore' + * Source Information: PC general + * + * ---------------- + * + * Source Product Description InfoFrame conformity: PASS + * + * Testing + * ~~~~~~~ + * + * The helpers have unit testing and can be tested using kunit with: + * + * .. code-block:: bash + * + * $ ./tools/testing/kunit/kunit.py run \ + * --kunitconfig=drivers/gpu/drm/tests \ + * drm_atomic_helper_connector_hdmi_* + */ + +/** * __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources * @connector: DRM connector * @new_conn_state: connector state to reset @@ -816,7 +1108,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector, * @status: Connection status * * This function should be called as a part of the .detect() / .detect_ctx() - * callbacks, updating the HDMI-specific connector's data. + * callbacks for all status changes. */ void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector, enum drm_connector_status status) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 9ea2611770f4..0138cf0b8b63 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state); * state). This is especially true in enable hooks because the pipeline has * changed. * + * If you don't have access to the atomic state, see + * drm_atomic_get_connector_for_encoder(). + * * Returns: The old connector connected to @encoder, or NULL if the encoder is * not connected. */ @@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder); * attached to @encoder vs ones that do (and to inspect their state). This is * especially true in disable hooks because the pipeline will change. * + * If you don't have access to the atomic state, see + * drm_atomic_get_connector_for_encoder(). + * * Returns: The new connector connected to @encoder, or NULL if the encoder is * not connected. */ @@ -988,6 +994,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder); /** + * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder + * @encoder: The encoder to find the connector of + * @ctx: Modeset locking context + * + * This function finds and returns the connector currently assigned to + * an @encoder. + * + * It is similar to the drm_atomic_get_old_connector_for_encoder() and + * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't + * require access to the atomic state. If you have access to it, prefer + * using these. This helper is typically useful in situations where you + * don't have access to the atomic state, like detect, link repair, + * threaded interrupt handlers, or hooks from other frameworks (ALSA, + * CEC, etc.). + * + * Returns: + * The connector connected to @encoder, or an error pointer otherwise. + * When the error is EDEADLK, a deadlock has been detected and the + * sequence must be restarted. + */ +struct drm_connector * +drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_connector_list_iter conn_iter; + struct drm_connector *out_connector = ERR_PTR(-EINVAL); + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + int ret; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ERR_PTR(ret); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!connector->state) + continue; + + if (encoder == connector->state->best_encoder) { + out_connector = connector; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return out_connector; +} +EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder); + + +/** * drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder * @state: Atomic state * @encoder: The encoder to fetch the crtc state for diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5302ab324898..ee64ca1b1bec 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3409,6 +3409,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all); * This implies a reset of all active components available between the CRTC and * connectors. * + * A variant of this function exists with + * drm_bridge_helper_reset_crtc(), dedicated to bridges. + * * NOTE: This relies on resetting &drm_crtc_state.connectors_changed. * For drivers which optimize out unnecessary modesets this will result in * a no-op commit, achieving nothing. diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index fa2794217a90..b4c89ec01998 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/media-bus-format.h> #include <linux/module.h> @@ -198,10 +199,96 @@ static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); +static void __drm_bridge_free(struct kref *kref) +{ + struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); + + kfree(bridge->container); +} + +/** + * drm_bridge_get - Acquire a bridge reference + * @bridge: DRM bridge + * + * This function increments the bridge's refcount. + * + * Returns: + * Pointer to @bridge. + */ +struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge) +{ + if (bridge) + kref_get(&bridge->refcount); + + return bridge; +} +EXPORT_SYMBOL(drm_bridge_get); + +/** + * drm_bridge_put - Release a bridge reference + * @bridge: DRM bridge + * + * This function decrements the bridge's reference count and frees the + * object if the reference count drops to zero. + */ +void drm_bridge_put(struct drm_bridge *bridge) +{ + if (bridge) + kref_put(&bridge->refcount, __drm_bridge_free); +} +EXPORT_SYMBOL(drm_bridge_put); + +/** + * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer + * + * @data: pointer to @struct drm_bridge, cast to a void pointer + * + * Wrapper of drm_bridge_put() to be used when a function taking a void + * pointer is needed, for example as a devm action. + */ +static void drm_bridge_put_void(void *data) +{ + struct drm_bridge *bridge = (struct drm_bridge *)data; + + drm_bridge_put(bridge); +} + +void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, + const struct drm_bridge_funcs *funcs) +{ + void *container; + struct drm_bridge *bridge; + int err; + + if (!funcs) { + dev_warn(dev, "Missing funcs pointer\n"); + return ERR_PTR(-EINVAL); + } + + container = kzalloc(size, GFP_KERNEL); + if (!container) + return ERR_PTR(-ENOMEM); + + bridge = container + offset; + bridge->container = container; + bridge->funcs = funcs; + kref_init(&bridge->refcount); + + err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge); + if (err) + return ERR_PTR(err); + + return container; +} +EXPORT_SYMBOL(__devm_drm_bridge_alloc); + /** * drm_bridge_add - add the given bridge to the global bridge list * * @bridge: bridge control structure + * + * The bridge to be added must have been allocated by + * devm_drm_bridge_alloc(). */ void drm_bridge_add(struct drm_bridge *bridge) { @@ -280,6 +367,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, }; +static bool drm_bridge_is_atomic(struct drm_bridge *bridge) +{ + return bridge->funcs->atomic_reset != NULL; +} + /** * drm_bridge_attach - attach the bridge to an encoder's chain * @@ -327,12 +419,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, list_add(&bridge->chain_node, &encoder->bridge_chain); if (bridge->funcs->attach) { - ret = bridge->funcs->attach(bridge, flags); + ret = bridge->funcs->attach(bridge, encoder, flags); if (ret < 0) goto err_reset_bridge; } - if (bridge->funcs->atomic_reset) { + if (drm_bridge_is_atomic(bridge)) { struct drm_bridge_state *state; state = bridge->funcs->atomic_reset(bridge); @@ -377,7 +469,7 @@ void drm_bridge_detach(struct drm_bridge *bridge) if (WARN_ON(!bridge->dev)) return; - if (bridge->funcs->atomic_reset) + if (drm_bridge_is_atomic(bridge)) drm_atomic_private_obj_fini(&bridge->base); if (bridge->funcs->detach) @@ -1300,6 +1392,75 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif +static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, + struct drm_bridge *bridge, + unsigned int idx) +{ + drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); + drm_printf(p, "\ttype: [%d] %s\n", + bridge->type, + drm_get_connector_type_name(bridge->type)); + + if (bridge->of_node) + drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); + + drm_printf(p, "\tops: [0x%x]", bridge->ops); + if (bridge->ops & DRM_BRIDGE_OP_DETECT) + drm_puts(p, " detect"); + if (bridge->ops & DRM_BRIDGE_OP_EDID) + drm_puts(p, " edid"); + if (bridge->ops & DRM_BRIDGE_OP_HPD) + drm_puts(p, " hpd"); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + drm_puts(p, " modes"); + if (bridge->ops & DRM_BRIDGE_OP_HDMI) + drm_puts(p, " hdmi"); + drm_puts(p, "\n"); +} + +static int allbridges_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + mutex_lock(&bridge_lock); + + list_for_each_entry(bridge, &bridge_list, list) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + + mutex_unlock(&bridge_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(allbridges); + +static int encoder_bridges_show(struct seq_file *m, void *data) +{ + struct drm_encoder *encoder = m->private; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + drm_for_each_bridge_in_chain(encoder, bridge) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(encoder_bridges); + +void drm_bridge_debugfs_params(struct dentry *root) +{ + debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops); +} + +void drm_bridge_debugfs_encoder_params(struct dentry *root, + struct drm_encoder *encoder) +{ + /* bridges list */ + debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops); +} + MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c new file mode 100644 index 000000000000..af80d2496194 --- /dev/null +++ b/drivers/gpu/drm/drm_bridge_helper.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_helper.h> +#include <drm/drm_modeset_lock.h> + +/** + * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge + * @bridge: DRM bridge to reset + * @ctx: lock acquisition context + * + * Reset a @bridge pipeline. It will power-cycle all active components + * between the CRTC and connector that bridge is connected to. + * + * As it relies on drm_atomic_helper_reset_crtc(), the same limitations + * apply. + * + * Returns: + * + * 0 on success or a negative error code on failure. If the error + * returned is EDEADLK, the whole atomic sequence must be restarted. + */ +int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_connector *connector; + struct drm_encoder *encoder = bridge->encoder; + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc; + int ret; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ret; + + connector = drm_atomic_get_connector_for_encoder(encoder, ctx); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + goto out; + } + + if (!connector->state) { + ret = -EINVAL; + goto out; + } + + crtc = connector->state->crtc; + ret = drm_atomic_helper_reset_crtc(crtc, ctx); + if (ret) + goto out; + +out: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + return ret; +} +EXPORT_SYMBOL(drm_bridge_helper_reset_crtc); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 549b28a5918c..f1de7faf9fb4 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -174,7 +174,7 @@ EXPORT_SYMBOL(drm_client_release); static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { if (buffer->gem) { - drm_gem_vunmap_unlocked(buffer->gem, &buffer->map); + drm_gem_vunmap(buffer->gem, &buffer->map); drm_gem_object_put(buffer->gem); } @@ -252,7 +252,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, drm_gem_lock(gem); - ret = drm_gem_vmap(gem, map); + ret = drm_gem_vmap_locked(gem, map); if (ret) goto err_drm_gem_vmap_unlocked; *map_copy = *map; @@ -278,7 +278,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; - drm_gem_vunmap(gem, map); + drm_gem_vunmap_locked(gem, map); drm_gem_unlock(gem); } EXPORT_SYMBOL(drm_client_buffer_vunmap_local); @@ -316,7 +316,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, ret = drm_gem_pin_locked(gem); if (ret) goto err_drm_gem_pin_locked; - ret = drm_gem_vmap(gem, map); + ret = drm_gem_vmap_locked(gem, map); if (ret) goto err_drm_gem_vmap; @@ -348,7 +348,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) struct iosys_map *map = &buffer->map; drm_gem_lock(gem); - drm_gem_vunmap(gem, map); + drm_gem_vunmap_locked(gem, map); drm_gem_unpin_locked(gem); drm_gem_unlock(gem); } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index aca442c25209..0f9d5ba36c81 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -39,7 +39,7 @@ int drm_client_modeset_create(struct drm_client_dev *client) unsigned int max_connector_count = 1; struct drm_mode_set *modeset; struct drm_crtc *crtc; - unsigned int i = 0; + int i = 0; /* Add terminating zero entry to enable index less iteration */ client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL); @@ -73,9 +73,10 @@ err_free: static void drm_client_modeset_release(struct drm_client_dev *client) { struct drm_mode_set *modeset; - unsigned int i; drm_client_for_each_modeset(modeset, client) { + int i; + drm_mode_destroy(client->dev, modeset->mode); modeset->mode = NULL; modeset->fb = NULL; @@ -117,10 +118,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc) return NULL; } -static struct drm_display_mode * +static const struct drm_display_mode * drm_connector_get_tiled_mode(struct drm_connector *connector) { - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->modes, head) { if (mode->hdisplay == connector->tile_h_size && @@ -130,10 +131,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector) return NULL; } -static struct drm_display_mode * +static const struct drm_display_mode * drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) { - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->modes, head) { if (mode->hdisplay == connector->tile_h_size && @@ -144,10 +145,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) return NULL; } -static struct drm_display_mode * +static const struct drm_display_mode * drm_connector_preferred_mode(struct drm_connector *connector, int width, int height) { - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->modes, head) { if (mode->hdisplay > width || @@ -159,16 +160,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei return NULL; } -static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector) +static const struct drm_display_mode * +drm_connector_first_mode(struct drm_connector *connector) { return list_first_entry_or_null(&connector->modes, struct drm_display_mode, head); } -static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector) +static const struct drm_display_mode * +drm_connector_pick_cmdline_mode(struct drm_connector *connector) { - struct drm_cmdline_mode *cmdline_mode; - struct drm_display_mode *mode; + const struct drm_cmdline_mode *cmdline_mode; + const struct drm_display_mode *mode; bool prefer_non_interlace; /* @@ -237,9 +240,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict) return enable; } -static void drm_client_connectors_enabled(struct drm_connector **connectors, +static void drm_client_connectors_enabled(struct drm_connector *connectors[], unsigned int connector_count, - bool *enabled) + bool enabled[]) { bool any_enabled = false; struct drm_connector *connector; @@ -263,16 +266,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors, enabled[i] = drm_connector_enabled(connectors[i], false); } +static void mode_replace(struct drm_device *dev, + const struct drm_display_mode **dst, + const struct drm_display_mode *src) +{ + drm_mode_destroy(dev, (struct drm_display_mode *)*dst); + + *dst = src ? drm_mode_duplicate(dev, src) : NULL; +} + +static void modes_destroy(struct drm_device *dev, + const struct drm_display_mode *modes[], + int count) +{ + int i; + + for (i = 0; i < count; i++) + mode_replace(dev, &modes[i], NULL); +} + static bool drm_client_target_cloned(struct drm_device *dev, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_display_mode **modes, - struct drm_client_offset *offsets, - bool *enabled, int width, int height) + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], + bool enabled[], int width, int height) { - int count, i, j; + int count, i; bool can_clone = false; - struct drm_display_mode *dmt_mode, *mode; + struct drm_display_mode *dmt_mode; /* only contemplate cloning in the single crtc case */ if (dev->mode_config.num_crtc > 1) @@ -291,9 +313,13 @@ static bool drm_client_target_cloned(struct drm_device *dev, /* check the command line or if nothing common pick 1024x768 */ can_clone = true; for (i = 0; i < connector_count; i++) { + int j; + if (!enabled[i]) continue; - modes[i] = drm_connector_pick_cmdline_mode(connectors[i]); + + mode_replace(dev, &modes[i], + drm_connector_pick_cmdline_mode(connectors[i])); if (!modes[i]) { can_clone = false; break; @@ -323,6 +349,8 @@ static bool drm_client_target_cloned(struct drm_device *dev, goto fail; for (i = 0; i < connector_count; i++) { + const struct drm_display_mode *mode; + if (!enabled[i]) continue; @@ -332,7 +360,7 @@ static bool drm_client_target_cloned(struct drm_device *dev, DRM_MODE_MATCH_CLOCK | DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS)) - modes[i] = mode; + mode_replace(dev, &modes[i], mode); } if (!modes[i]) can_clone = false; @@ -349,19 +377,19 @@ fail: } static int drm_client_get_tile_offsets(struct drm_device *dev, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_display_mode **modes, - struct drm_client_offset *offsets, + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], int idx, int h_idx, int v_idx) { - struct drm_connector *connector; int i; int hoffset = 0, voffset = 0; for (i = 0; i < connector_count; i++) { - connector = connectors[i]; + struct drm_connector *connector = connectors[i]; + if (!connector->has_tile) continue; @@ -384,14 +412,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev, } static bool drm_client_target_preferred(struct drm_device *dev, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_display_mode **modes, - struct drm_client_offset *offsets, - bool *enabled, int width, int height) + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], + bool enabled[], int width, int height) { const u64 mask = BIT_ULL(connector_count) - 1; - struct drm_connector *connector; u64 conn_configured = 0; int tile_pass = 0; int num_tiled_conns = 0; @@ -405,7 +432,9 @@ static bool drm_client_target_preferred(struct drm_device *dev, retry: for (i = 0; i < connector_count; i++) { - connector = connectors[i]; + struct drm_connector *connector = connectors[i]; + const char *mode_type; + if (conn_configured & BIT_ULL(i)) continue; @@ -438,20 +467,23 @@ retry: modes, offsets, i, connector->tile_h_loc, connector->tile_v_loc); } - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n", - connector->base.id, connector->name); - /* got for command line mode first */ - modes[i] = drm_connector_pick_cmdline_mode(connector); + mode_type = "cmdline"; + mode_replace(dev, &modes[i], + drm_connector_pick_cmdline_mode(connector)); + if (!modes[i]) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n", - connector->base.id, connector->name, - connector->tile_group ? connector->tile_group->id : 0); - modes[i] = drm_connector_preferred_mode(connector, width, height); + mode_type = "preferred"; + mode_replace(dev, &modes[i], + drm_connector_preferred_mode(connector, width, height)); } - /* No preferred modes, pick one off the list */ - if (!modes[i]) - modes[i] = drm_connector_first_mode(connector); + + if (!modes[i]) { + mode_type = "first"; + mode_replace(dev, &modes[i], + drm_connector_first_mode(connector)); + } + /* * In case of tiled mode if all tiles not present fallback to * first available non tiled mode. @@ -466,18 +498,24 @@ retry: (connector->tile_h_loc == 0 && connector->tile_v_loc == 0 && !drm_connector_get_tiled_mode(connector))) { - drm_dbg_kms(dev, - "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n", - connector->base.id, connector->name); - modes[i] = drm_connector_fallback_non_tiled_mode(connector); + mode_type = "non tiled"; + mode_replace(dev, &modes[i], + drm_connector_fallback_non_tiled_mode(connector)); } else { - modes[i] = drm_connector_get_tiled_mode(connector); + mode_type = "tiled"; + mode_replace(dev, &modes[i], + drm_connector_get_tiled_mode(connector)); } } - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n", - connector->base.id, connector->name, - modes[i] ? modes[i]->name : "none"); + if (modes[i]) + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n", + connector->base.id, connector->name, + mode_type, modes[i]->name); + else + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n", + connector->base.id, connector->name); + conn_configured |= BIT_ULL(i); } @@ -502,18 +540,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector, } static int drm_client_pick_crtcs(struct drm_client_dev *client, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_crtc **best_crtcs, - struct drm_display_mode **modes, + struct drm_crtc *best_crtcs[], + const struct drm_display_mode *modes[], int n, int width, int height) { struct drm_device *dev = client->dev; struct drm_connector *connector; int my_score, best_score, score; - struct drm_crtc **crtcs, *crtc; + struct drm_crtc **crtcs; struct drm_mode_set *modeset; - int o; if (n == connector_count) return 0; @@ -543,7 +580,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, * remaining connectors */ drm_client_for_each_modeset(modeset, client) { - crtc = modeset->crtc; + struct drm_crtc *crtc = modeset->crtc; + int o; if (!connector_has_possible_crtc(connector, crtc)) continue; @@ -577,17 +615,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, /* Try to read the BIOS display configuration and use it for the initial config */ static bool drm_client_firmware_config(struct drm_client_dev *client, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_crtc **crtcs, - struct drm_display_mode **modes, - struct drm_client_offset *offsets, - bool *enabled, int width, int height) + struct drm_crtc *crtcs[], + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], + bool enabled[], int width, int height) { const int count = min_t(unsigned int, connector_count, BITS_PER_LONG); unsigned long conn_configured, conn_seq, mask; struct drm_device *dev = client->dev; - int i, j; + int i; bool *save_enabled; bool fallback = true, ret = true; int num_connectors_enabled = 0; @@ -621,11 +659,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, retry: conn_seq = conn_configured; for (i = 0; i < count; i++) { - struct drm_connector *connector; + struct drm_connector *connector = connectors[i]; struct drm_encoder *encoder; - struct drm_crtc *new_crtc; - - connector = connectors[i]; + struct drm_crtc *crtc; + const char *mode_type; + int j; if (conn_configured & BIT(i)) continue; @@ -664,7 +702,7 @@ retry: num_connectors_enabled++; - new_crtc = connector->state->crtc; + crtc = connector->state->crtc; /* * Make sure we're not trying to drive multiple connectors @@ -672,69 +710,52 @@ retry: * match the BIOS. */ for (j = 0; j < count; j++) { - if (crtcs[j] == new_crtc) { - drm_dbg_kms(dev, "fallback: cloned configuration\n"); + if (crtcs[j] == crtc) { + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n", + connector->base.id, connector->name); goto bail; } } - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n", - connector->base.id, connector->name); - - /* go for command line mode first */ - modes[i] = drm_connector_pick_cmdline_mode(connector); + mode_type = "cmdline"; + mode_replace(dev, &modes[i], + drm_connector_pick_cmdline_mode(connector)); - /* try for preferred next */ if (!modes[i]) { - drm_dbg_kms(dev, - "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n", - connector->base.id, connector->name, - str_yes_no(connector->has_tile)); - modes[i] = drm_connector_preferred_mode(connector, width, height); + mode_type = "preferred"; + mode_replace(dev, &modes[i], + drm_connector_preferred_mode(connector, width, height)); } - /* No preferred mode marked by the EDID? Are there any modes? */ - if (!modes[i] && !list_empty(&connector->modes)) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n", - connector->base.id, connector->name); - modes[i] = drm_connector_first_mode(connector); + if (!modes[i]) { + mode_type = "first"; + mode_replace(dev, &modes[i], + drm_connector_first_mode(connector)); } /* last resort: use current mode */ if (!modes[i]) { - /* - * IMPORTANT: We want to use the adjusted mode (i.e. - * after the panel fitter upscaling) as the initial - * config, not the input mode, which is what crtc->mode - * usually contains. But since our current - * code puts a mode derived from the post-pfit timings - * into crtc->mode this works out correctly. - * - * This is crtc->mode and not crtc->state->mode for the - * fastboot check to work correctly. - */ - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n", - connector->base.id, connector->name); - modes[i] = &connector->state->crtc->mode; + mode_type = "current"; + mode_replace(dev, &modes[i], + &crtc->state->mode); } + /* * In case of tiled modes, if all tiles are not present * then fallback to a non tiled mode. */ if (connector->has_tile && num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n", - connector->base.id, connector->name); - modes[i] = drm_connector_fallback_non_tiled_mode(connector); + mode_type = "non tiled"; + mode_replace(dev, &modes[i], + drm_connector_fallback_non_tiled_mode(connector)); } - crtcs[i] = new_crtc; + crtcs[i] = crtc; - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n", + drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n", connector->base.id, connector->name, - connector->state->crtc->base.id, - connector->state->crtc->name, - modes[i]->hdisplay, modes[i]->vdisplay, - modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : ""); + crtc->base.id, crtc->name, + mode_type, modes[i]->name); fallback = false; conn_configured |= BIT(i); @@ -799,8 +820,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int total_modes_count = 0; struct drm_client_offset *offsets; unsigned int connector_count = 0; - /* points to modes protected by mode_config.mutex */ - struct drm_display_mode **modes; + const struct drm_display_mode **modes; struct drm_crtc **crtcs; int i, ret = 0; bool *enabled; @@ -851,7 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, if (!drm_client_firmware_config(client, connectors, connector_count, crtcs, modes, offsets, enabled, width, height)) { - memset(modes, 0, connector_count * sizeof(*modes)); + modes_destroy(dev, modes, connector_count); memset(crtcs, 0, connector_count * sizeof(*crtcs)); memset(offsets, 0, connector_count * sizeof(*offsets)); @@ -868,10 +888,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, crtcs, modes, 0, width, height); } + mutex_unlock(&dev->mode_config.mutex); + drm_client_modeset_release(client); for (i = 0; i < connector_count; i++) { - struct drm_display_mode *mode = modes[i]; + const struct drm_display_mode *mode = modes[i]; struct drm_crtc *crtc = crtcs[i]; struct drm_client_offset *offset = &offsets[i]; @@ -902,11 +924,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, modeset->y = offset->y; } } - mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&client->modeset_mutex); out: kfree(crtcs); + modes_destroy(dev, modes, connector_count); kfree(modes); kfree(offsets); kfree(enabled); @@ -938,7 +960,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) struct drm_plane *plane = modeset->crtc->primary; struct drm_cmdline_mode *cmdline; u64 valid_mask = 0; - unsigned int i; + int i; if (!modeset->num_connectors) return false; @@ -1219,11 +1241,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp struct drm_connector *connector; struct drm_mode_set *modeset; struct drm_modeset_acquire_ctx ctx; - int j; int ret; DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); drm_client_for_each_modeset(modeset, client) { + int j; + if (!modeset->crtc->enabled) continue; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 0955f1c385dd..39497493f74c 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!encoder_funcs) continue; - encoder_funcs = encoder->helper_private; if (encoder_funcs->mode_fixup) { if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) { diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6b2178864c7e..3dfd8b34dceb 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -740,40 +740,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc) crtc->debugfs_entry = NULL; } -static int bridges_show(struct seq_file *m, void *data) -{ - struct drm_encoder *encoder = m->private; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_bridge *bridge; - unsigned int idx = 0; - - drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs); - drm_printf(&p, "\ttype: [%d] %s\n", - bridge->type, - drm_get_connector_type_name(bridge->type)); - - if (bridge->of_node) - drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); - - drm_printf(&p, "\tops: [0x%x]", bridge->ops); - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - drm_puts(&p, " detect"); - if (bridge->ops & DRM_BRIDGE_OP_EDID) - drm_puts(&p, " edid"); - if (bridge->ops & DRM_BRIDGE_OP_HPD) - drm_puts(&p, " hpd"); - if (bridge->ops & DRM_BRIDGE_OP_MODES) - drm_puts(&p, " modes"); - if (bridge->ops & DRM_BRIDGE_OP_HDMI) - drm_puts(&p, " hdmi"); - drm_puts(&p, "\n"); - } - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(bridges); - void drm_debugfs_encoder_add(struct drm_encoder *encoder) { struct drm_minor *minor = encoder->dev->primary; @@ -789,9 +755,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder) encoder->debugfs_entry = root; - /* bridges list */ - debugfs_create_file("bridges", 0444, root, encoder, - &bridges_fops); + drm_bridge_debugfs_encoder_params(root, encoder); if (encoder->funcs && encoder->funcs->debugfs_init) encoder->funcs->debugfs_init(encoder, root); diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h index aee1b86a73c1..957dd0619f5c 100644 --- a/drivers/gpu/drm/drm_displayid_internal.h +++ b/drivers/gpu/drm/drm_displayid_internal.h @@ -66,6 +66,7 @@ struct drm_edid; #define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27 #define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28 #define DATA_BLOCK_2_CONTAINER_ID 0x29 +#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a #define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e #define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81 @@ -114,20 +115,32 @@ struct displayid_tiled_block { struct displayid_detailed_timings_1 { u8 pixel_clock[3]; u8 flags; - u8 hactive[2]; - u8 hblank[2]; - u8 hsync[2]; - u8 hsw[2]; - u8 vactive[2]; - u8 vblank[2]; - u8 vsync[2]; - u8 vsw[2]; + __le16 hactive; + __le16 hblank; + __le16 hsync; + __le16 hsw; + __le16 vactive; + __le16 vblank; + __le16 vsync; + __le16 vsw; } __packed; struct displayid_detailed_timing_block { struct displayid_block base; struct displayid_detailed_timings_1 timings[]; -}; +} __packed; + +struct displayid_formula_timings_9 { + u8 flags; + __le16 hactive; + __le16 vactive; + u8 vrefresh; +} __packed; + +struct displayid_formula_timing_block { + struct displayid_block base; + struct displayid_formula_timings_9 timings[]; +} __packed; #define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0) #define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5) diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c index 385eb5e10047..9dc0408fbbea 100644 --- a/drivers/gpu/drm/drm_draw.c +++ b/drivers/gpu/drm/drm_draw.c @@ -13,85 +13,7 @@ #include <drm/drm_fourcc.h> #include "drm_draw_internal.h" - -/* - * Conversions from xrgb8888 - */ - -static u16 convert_xrgb8888_to_rgb565(u32 pix) -{ - return ((pix & 0x00F80000) >> 8) | - ((pix & 0x0000FC00) >> 5) | - ((pix & 0x000000F8) >> 3); -} - -static u16 convert_xrgb8888_to_rgba5551(u32 pix) -{ - return ((pix & 0x00f80000) >> 8) | - ((pix & 0x0000f800) >> 5) | - ((pix & 0x000000f8) >> 2) | - BIT(0); /* set alpha bit */ -} - -static u16 convert_xrgb8888_to_xrgb1555(u32 pix) -{ - return ((pix & 0x00f80000) >> 9) | - ((pix & 0x0000f800) >> 6) | - ((pix & 0x000000f8) >> 3); -} - -static u16 convert_xrgb8888_to_argb1555(u32 pix) -{ - return BIT(15) | /* set alpha bit */ - ((pix & 0x00f80000) >> 9) | - ((pix & 0x0000f800) >> 6) | - ((pix & 0x000000f8) >> 3); -} - -static u32 convert_xrgb8888_to_argb8888(u32 pix) -{ - return pix | GENMASK(31, 24); /* fill alpha bits */ -} - -static u32 convert_xrgb8888_to_xbgr8888(u32 pix) -{ - return ((pix & 0x00ff0000) >> 16) << 0 | - ((pix & 0x0000ff00) >> 8) << 8 | - ((pix & 0x000000ff) >> 0) << 16 | - ((pix & 0xff000000) >> 24) << 24; -} - -static u32 convert_xrgb8888_to_abgr8888(u32 pix) -{ - return ((pix & 0x00ff0000) >> 16) << 0 | - ((pix & 0x0000ff00) >> 8) << 8 | - ((pix & 0x000000ff) >> 0) << 16 | - GENMASK(31, 24); /* fill alpha bits */ -} - -static u32 convert_xrgb8888_to_xrgb2101010(u32 pix) -{ - pix = ((pix & 0x000000FF) << 2) | - ((pix & 0x0000FF00) << 4) | - ((pix & 0x00FF0000) << 6); - return pix | ((pix >> 8) & 0x00300C03); -} - -static u32 convert_xrgb8888_to_argb2101010(u32 pix) -{ - pix = ((pix & 0x000000FF) << 2) | - ((pix & 0x0000FF00) << 4) | - ((pix & 0x00FF0000) << 6); - return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); -} - -static u32 convert_xrgb8888_to_abgr2101010(u32 pix) -{ - pix = ((pix & 0x00FF0000) >> 14) | - ((pix & 0x0000FF00) << 4) | - ((pix & 0x000000FF) << 22); - return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03); -} +#include "drm_format_internal.h" /** * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format @@ -106,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format) { switch (format) { case DRM_FORMAT_RGB565: - return convert_xrgb8888_to_rgb565(color); + return drm_pixel_xrgb8888_to_rgb565(color); case DRM_FORMAT_RGBA5551: - return convert_xrgb8888_to_rgba5551(color); + return drm_pixel_xrgb8888_to_rgba5551(color); case DRM_FORMAT_XRGB1555: - return convert_xrgb8888_to_xrgb1555(color); + return drm_pixel_xrgb8888_to_xrgb1555(color); case DRM_FORMAT_ARGB1555: - return convert_xrgb8888_to_argb1555(color); + return drm_pixel_xrgb8888_to_argb1555(color); case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: return color; case DRM_FORMAT_ARGB8888: - return convert_xrgb8888_to_argb8888(color); + return drm_pixel_xrgb8888_to_argb8888(color); case DRM_FORMAT_XBGR8888: - return convert_xrgb8888_to_xbgr8888(color); + return drm_pixel_xrgb8888_to_xbgr8888(color); case DRM_FORMAT_ABGR8888: - return convert_xrgb8888_to_abgr8888(color); + return drm_pixel_xrgb8888_to_abgr8888(color); case DRM_FORMAT_XRGB2101010: - return convert_xrgb8888_to_xrgb2101010(color); + return drm_pixel_xrgb8888_to_xrgb2101010(color); case DRM_FORMAT_ARGB2101010: - return convert_xrgb8888_to_argb2101010(color); + return drm_pixel_xrgb8888_to_argb2101010(color); case DRM_FORMAT_ABGR2101010: - return convert_xrgb8888_to_abgr2101010(color); + return drm_pixel_xrgb8888_to_abgr2101010(color); default: WARN_ONCE(1, "Can't convert to %p4cc\n", &format); return 0; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 17fc5dc708f4..3dc7acd56b1d 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -40,6 +40,7 @@ #include <linux/xarray.h> #include <drm/drm_accel.h> +#include <drm/drm_bridge.h> #include <drm/drm_cache.h> #include <drm/drm_client_event.h> #include <drm/drm_color_mgmt.h> @@ -500,6 +501,25 @@ void drm_dev_unplug(struct drm_device *dev) } EXPORT_SYMBOL(drm_dev_unplug); +/** + * drm_dev_set_dma_dev - set the DMA device for a DRM device + * @dev: DRM device + * @dma_dev: DMA device or NULL + * + * Sets the DMA device of the given DRM device. Only required if + * the DMA device is different from the DRM device's parent. After + * calling this function, the DRM device holds a reference on + * @dma_dev. Pass NULL to clear the DMA device. + */ +void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev) +{ + dma_dev = get_device(dma_dev); + + put_device(dev->dma_dev); + dev->dma_dev = dma_dev; +} +EXPORT_SYMBOL(drm_dev_set_dma_dev); + /* * Available recovery methods for wedged device. To be sent along with device * wedged uevent. @@ -654,6 +674,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) { drm_fs_inode_free(dev->anon_inode); + put_device(dev->dma_dev); + dev->dma_dev = NULL; put_device(dev->dev); /* Prevent use-after-free in drm_managed_release when debugging is * enabled. Slightly awkward, but can't really be helped. */ @@ -1188,6 +1210,7 @@ static int __init drm_core_init(void) } drm_debugfs_root = debugfs_create_dir("dri", NULL); + drm_bridge_debugfs_params(drm_debugfs_root); ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); if (ret < 0) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 13bc4c290b17..eab5196140d7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -6760,23 +6760,23 @@ out: } static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev, - struct displayid_detailed_timings_1 *timings, + const struct displayid_detailed_timings_1 *timings, bool type_7) { struct drm_display_mode *mode; - unsigned pixel_clock = (timings->pixel_clock[0] | - (timings->pixel_clock[1] << 8) | - (timings->pixel_clock[2] << 16)) + 1; - unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; - unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; - unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; - unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1; - unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1; - unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1; - unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1; - unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1; - bool hsync_positive = (timings->hsync[1] >> 7) & 0x1; - bool vsync_positive = (timings->vsync[1] >> 7) & 0x1; + unsigned int pixel_clock = (timings->pixel_clock[0] | + (timings->pixel_clock[1] << 8) | + (timings->pixel_clock[2] << 16)) + 1; + unsigned int hactive = le16_to_cpu(timings->hactive) + 1; + unsigned int hblank = le16_to_cpu(timings->hblank) + 1; + unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1; + unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1; + unsigned int vactive = le16_to_cpu(timings->vactive) + 1; + unsigned int vblank = le16_to_cpu(timings->vblank) + 1; + unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1; + unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1; + bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15); + bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15); mode = drm_mode_create(dev); if (!mode) @@ -6833,6 +6833,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector, return num_modes; } +static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev, + const struct displayid_formula_timings_9 *timings, + bool type_10) +{ + struct drm_display_mode *mode; + u16 hactive = le16_to_cpu(timings->hactive) + 1; + u16 vactive = le16_to_cpu(timings->vactive) + 1; + u8 timing_formula = timings->flags & 0x7; + + /* TODO: support RB-v2 & RB-v3 */ + if (timing_formula > 1) + return NULL; + + /* TODO: support video-optimized refresh rate */ + if (timings->flags & (1 << 4)) + drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate"); + + mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false); + if (!mode) + return NULL; + + /* TODO: interpret S3D flags */ + + mode->type = DRM_MODE_TYPE_DRIVER; + drm_mode_set_name(mode); + + return mode; +} + +static int add_displayid_formula_modes(struct drm_connector *connector, + const struct displayid_block *block) +{ + const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block; + int num_timings; + struct drm_display_mode *newmode; + int num_modes = 0; + bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING; + int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4); + + /* extended blocks are not supported yet */ + if (timing_size != 6) + return 0; + + if (block->num_bytes % timing_size) + return 0; + + num_timings = block->num_bytes / timing_size; + for (int i = 0; i < num_timings; i++) { + const struct displayid_formula_timings_9 *timings = &formula_block->timings[i]; + + newmode = drm_mode_displayid_formula(connector->dev, timings, type_10); + if (!newmode) + continue; + + drm_mode_probed_add(connector, newmode); + num_modes++; + } + return num_modes; +} + static int add_displayid_detailed_modes(struct drm_connector *connector, const struct drm_edid *drm_edid) { @@ -6845,6 +6905,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector, if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING || block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING) num_modes += add_displayid_detailed_1_modes(connector, block); + else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING || + block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING) + num_modes += add_displayid_formula_modes(connector, block); } displayid_iter_end(&iter); @@ -7099,18 +7162,12 @@ EXPORT_SYMBOL(drm_add_edid_modes); * Return: The number of modes added or 0 if we couldn't find any. */ int drm_add_modes_noedid(struct drm_connector *connector, - int hdisplay, int vdisplay) + unsigned int hdisplay, unsigned int vdisplay) { - int i, count, num_modes = 0; + int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0; struct drm_display_mode *mode; struct drm_device *dev = connector->dev; - count = ARRAY_SIZE(drm_dmt_modes); - if (hdisplay < 0) - hdisplay = 0; - if (vdisplay < 0) - vdisplay = 0; - for (i = 0; i < count; i++) { const struct drm_display_mode *ptr = &drm_dmt_modes[i]; diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 01d3ab307ac3..d36e6cacc575 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -20,6 +20,8 @@ #include <drm/drm_print.h> #include <drm/drm_rect.h> +#include "drm_format_internal.h" + /** * drm_format_conv_state_init - Initialize format-conversion state * @state: The state to initialize @@ -244,6 +246,152 @@ static int drm_fb_xfrm(struct iosys_map *dst, xfrm_line); } +#define ALIGN_DOWN_PIXELS(end, n, a) \ + ((end) - ((n) & ((a) - 1))) + +static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf, + unsigned int pixels, + u32 (*xfrm_pixel)(u32)) +{ + __le32 *dbuf32 = dbuf; + u8 *dbuf8; + const __le32 *sbuf32 = sbuf; + const __le32 *send32 = sbuf32 + pixels; + + /* write 4 pixels at once */ + while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) { + u32 pix[4] = { + le32_to_cpup(sbuf32), + le32_to_cpup(sbuf32 + 1), + le32_to_cpup(sbuf32 + 2), + le32_to_cpup(sbuf32 + 3), + }; + /* write output bytes in reverse order for little endianness */ + u32 val32 = xfrm_pixel(pix[0]) | + (xfrm_pixel(pix[1]) << 8) | + (xfrm_pixel(pix[2]) << 16) | + (xfrm_pixel(pix[3]) << 24); + *dbuf32++ = cpu_to_le32(val32); + sbuf32 += ARRAY_SIZE(pix); + } + + /* write trailing pixels */ + dbuf8 = (u8 __force *)dbuf32; + while (sbuf32 < send32) + *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++)); +} + +static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf, + unsigned int pixels, + u32 (*xfrm_pixel)(u32)) +{ + __le64 *dbuf64 = dbuf; + __le32 *dbuf32; + __le16 *dbuf16; + const __le32 *sbuf32 = sbuf; + const __le32 *send32 = sbuf32 + pixels; + +#if defined(CONFIG_64BIT) + /* write 4 pixels at once */ + while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) { + u32 pix[4] = { + le32_to_cpup(sbuf32), + le32_to_cpup(sbuf32 + 1), + le32_to_cpup(sbuf32 + 2), + le32_to_cpup(sbuf32 + 3), + }; + /* write output bytes in reverse order for little endianness */ + u64 val64 = ((u64)xfrm_pixel(pix[0])) | + ((u64)xfrm_pixel(pix[1]) << 16) | + ((u64)xfrm_pixel(pix[2]) << 32) | + ((u64)xfrm_pixel(pix[3]) << 48); + *dbuf64++ = cpu_to_le64(val64); + sbuf32 += ARRAY_SIZE(pix); + } +#endif + + /* write 2 pixels at once */ + dbuf32 = (__le32 __force *)dbuf64; + while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) { + u32 pix[2] = { + le32_to_cpup(sbuf32), + le32_to_cpup(sbuf32 + 1), + }; + /* write output bytes in reverse order for little endianness */ + u32 val32 = xfrm_pixel(pix[0]) | + (xfrm_pixel(pix[1]) << 16); + *dbuf32++ = cpu_to_le32(val32); + sbuf32 += ARRAY_SIZE(pix); + } + + /* write trailing pixel */ + dbuf16 = (__le16 __force *)dbuf32; + while (sbuf32 < send32) + *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++))); +} + +static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf, + unsigned int pixels, + u32 (*xfrm_pixel)(u32)) +{ + __le32 *dbuf32 = dbuf; + u8 *dbuf8; + const __le32 *sbuf32 = sbuf; + const __le32 *send32 = sbuf32 + pixels; + + /* write pixels in chunks of 4 */ + while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) { + u32 val24[4] = { + xfrm_pixel(le32_to_cpup(sbuf32)), + xfrm_pixel(le32_to_cpup(sbuf32 + 1)), + xfrm_pixel(le32_to_cpup(sbuf32 + 2)), + xfrm_pixel(le32_to_cpup(sbuf32 + 3)), + }; + u32 out32[3] = { + /* write output bytes in reverse order for little endianness */ + ((val24[0] & 0x000000ff)) | + ((val24[0] & 0x0000ff00)) | + ((val24[0] & 0x00ff0000)) | + ((val24[1] & 0x000000ff) << 24), + ((val24[1] & 0x0000ff00) >> 8) | + ((val24[1] & 0x00ff0000) >> 8) | + ((val24[2] & 0x000000ff) << 16) | + ((val24[2] & 0x0000ff00) << 16), + ((val24[2] & 0x00ff0000) >> 16) | + ((val24[3] & 0x000000ff) << 8) | + ((val24[3] & 0x0000ff00) << 8) | + ((val24[3] & 0x00ff0000) << 8), + }; + + *dbuf32++ = cpu_to_le32(out32[0]); + *dbuf32++ = cpu_to_le32(out32[1]); + *dbuf32++ = cpu_to_le32(out32[2]); + sbuf32 += ARRAY_SIZE(val24); + } + + /* write trailing pixel */ + dbuf8 = (u8 __force *)dbuf32; + while (sbuf32 < send32) { + u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++)); + /* write output in reverse order for little endianness */ + *dbuf8++ = (val24 & 0x000000ff); + *dbuf8++ = (val24 & 0x0000ff00) >> 8; + *dbuf8++ = (val24 & 0x00ff0000) >> 16; + } +} + +static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf, + unsigned int pixels, + u32 (*xfrm_pixel)(u32)) +{ + __le32 *dbuf32 = dbuf; + const __le32 *sbuf32 = sbuf; + const __le32 *send32 = sbuf32 + pixels; + + while (sbuf32 < send32) + *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++))); +} + /** * drm_fb_memcpy - Copy clip buffer * @dst: Array of destination buffers @@ -368,17 +516,7 @@ EXPORT_SYMBOL(drm_fb_swab); static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels) { - u8 *dbuf8 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - dbuf8[x] = ((pix & 0x00e00000) >> 16) | - ((pix & 0x0000e000) >> 11) | - ((pix & 0x000000c0) >> 6); - } + drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332); } /** @@ -417,38 +555,19 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332); static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le16 *dbuf16 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u16 val16; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val16 = ((pix & 0x00F80000) >> 8) | - ((pix & 0x0000FC00) >> 5) | - ((pix & 0x000000F8) >> 3); - dbuf16[x] = cpu_to_le16(val16); - } + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565); +} + +static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix) +{ + return swab16(drm_pixel_xrgb8888_to_rgb565(pix)); } /* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le16 *dbuf16 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u16 val16; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val16 = ((pix & 0x00F80000) >> 8) | - ((pix & 0x0000FC00) >> 5) | - ((pix & 0x000000F8) >> 3); - dbuf16[x] = cpu_to_le16(swab16(val16)); - } + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab); } /** @@ -495,19 +614,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le16 *dbuf16 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u16 val16; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val16 = ((pix & 0x00f80000) >> 9) | - ((pix & 0x0000f800) >> 6) | - ((pix & 0x000000f8) >> 3); - dbuf16[x] = cpu_to_le16(val16); - } + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555); } /** @@ -547,20 +654,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555); static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le16 *dbuf16 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u16 val16; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val16 = BIT(15) | /* set alpha bit */ - ((pix & 0x00f80000) >> 9) | - ((pix & 0x0000f800) >> 6) | - ((pix & 0x000000f8) >> 3); - dbuf16[x] = cpu_to_le16(val16); - } + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555); } /** @@ -600,20 +694,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555); static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le16 *dbuf16 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u16 val16; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val16 = ((pix & 0x00f80000) >> 8) | - ((pix & 0x0000f800) >> 5) | - ((pix & 0x000000f8) >> 2) | - BIT(0); /* set alpha bit */ - dbuf16[x] = cpu_to_le16(val16); - } + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551); } /** @@ -653,18 +734,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551); static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels) { - u8 *dbuf8 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - /* write blue-green-red to output in little endianness */ - *dbuf8++ = (pix & 0x000000FF) >> 0; - *dbuf8++ = (pix & 0x0000FF00) >> 8; - *dbuf8++ = (pix & 0x00FF0000) >> 16; - } + drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888); } /** @@ -704,18 +774,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels) { - u8 *dbuf8 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - /* write red-green-blue to output in little endianness */ - *dbuf8++ = (pix & 0x00ff0000) >> 16; - *dbuf8++ = (pix & 0x0000ff00) >> 8; - *dbuf8++ = (pix & 0x000000ff) >> 0; - } + drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888); } /** @@ -755,16 +814,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888); static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le32 *dbuf32 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - pix |= GENMASK(31, 24); /* fill alpha bits */ - dbuf32[x] = cpu_to_le32(pix); - } + drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888); } /** @@ -804,19 +854,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888); static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le32 *dbuf32 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - pix = ((pix & 0x00ff0000) >> 16) << 0 | - ((pix & 0x0000ff00) >> 8) << 8 | - ((pix & 0x000000ff) >> 0) << 16 | - GENMASK(31, 24); /* fill alpha bits */ - *dbuf32++ = cpu_to_le32(pix); - } + drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888); } static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, @@ -835,19 +873,7 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le32 *dbuf32 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - pix = ((pix & 0x00ff0000) >> 16) << 0 | - ((pix & 0x0000ff00) >> 8) << 8 | - ((pix & 0x000000ff) >> 0) << 16 | - ((pix & 0xff000000) >> 24) << 24; - *dbuf32++ = cpu_to_le32(pix); - } + drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888); } static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, @@ -866,20 +892,7 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le32 *dbuf32 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 val32; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val32 = ((pix & 0x000000FF) << 2) | - ((pix & 0x0000FF00) << 4) | - ((pix & 0x00FF0000) << 6); - pix = val32 | ((val32 >> 8) & 0x00300C03); - *dbuf32++ = cpu_to_le32(pix); - } + drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010); } /** @@ -920,21 +933,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010); static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels) { - __le32 *dbuf32 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 val32; - u32 pix; - - for (x = 0; x < pixels; x++) { - pix = le32_to_cpu(sbuf32[x]); - val32 = ((pix & 0x000000ff) << 2) | - ((pix & 0x0000ff00) << 4) | - ((pix & 0x00ff0000) << 6); - pix = GENMASK(31, 30) | /* set alpha bits */ - val32 | ((val32 >> 8) & 0x00300c03); - *dbuf32++ = cpu_to_le32(pix); - } + drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010); } /** @@ -975,19 +974,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010); static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels) { - u8 *dbuf8 = dbuf; - const __le32 *sbuf32 = sbuf; - unsigned int x; - - for (x = 0; x < pixels; x++) { - u32 pix = le32_to_cpu(sbuf32[x]); - u8 r = (pix & 0x00ff0000) >> 16; - u8 g = (pix & 0x0000ff00) >> 8; - u8 b = pix & 0x000000ff; - - /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ - *dbuf8++ = (3 * r + 6 * g + b) / 10; - } + drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601); } /** @@ -1031,36 +1018,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels) { - unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0); - __le32 *dbuf32 = dbuf; - __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16); - const __le32 *sbuf32 = sbuf; - unsigned int x; - u32 val32; - u16 val16; - u32 pix[2]; - - for (x = 0; x < pixels2; x += 2, ++dbuf32) { - pix[0] = le32_to_cpu(sbuf32[x]); - pix[1] = le32_to_cpu(sbuf32[x + 1]); - val32 = ((pix[0] & 0xf0000000) >> 16) | - ((pix[0] & 0x00f00000) >> 12) | - ((pix[0] & 0x0000f000) >> 8) | - ((pix[0] & 0x000000f0) >> 4) | - ((pix[1] & 0xf0000000) >> 0) | - ((pix[1] & 0x00f00000) << 4) | - ((pix[1] & 0x0000f000) << 8) | - ((pix[1] & 0x000000f0) << 12); - *dbuf32 = cpu_to_le32(val32); - } - for (; x < pixels; x++) { - pix[0] = le32_to_cpu(sbuf32[x]); - val16 = ((pix[0] & 0xf0000000) >> 16) | - ((pix[0] & 0x00f00000) >> 12) | - ((pix[0] & 0x0000f000) >> 8) | - ((pix[0] & 0x000000f0) >> 4); - dbuf16[x] = cpu_to_le16(val16); - } + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444); } /** diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h new file mode 100644 index 000000000000..9f857bfa368d --- /dev/null +++ b/drivers/gpu/drm/drm_format_internal.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ + +#ifndef DRM_FORMAT_INTERNAL_H +#define DRM_FORMAT_INTERNAL_H + +#include <linux/bits.h> +#include <linux/types.h> + +/* + * Each pixel-format conversion helper takes a raw pixel in a + * specific input format and returns a raw pixel in a specific + * output format. All pixels are in little-endian byte order. + * + * Function names are + * + * drm_pixel_<input>_to_<output>_<algorithm>() + * + * where <input> and <output> refer to pixel formats. The + * <algorithm> is optional and hints to the method used for the + * conversion. Helpers with no algorithm given apply pixel-bit + * shifting. + * + * The argument type is u32. We expect this to be wide enough to + * hold all conversion input from 32-bit RGB to any output format. + * The Linux kernel should avoid format conversion for anything + * but XRGB8888 input data. Converting from other format can still + * be acceptable in some cases. + * + * The return type is u32. It is wide enough to hold all conversion + * output from XRGB8888. For output formats wider than 32 bit, a + * return type of u64 would be acceptable. + */ + +/* + * Conversions from XRGB8888 + */ + +static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix) +{ + u32 r = (pix & 0x00ff0000) >> 16; + u32 g = (pix & 0x0000ff00) >> 8; + u32 b = pix & 0x000000ff; + + /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ + return (3 * r + 6 * g + b) / 10; +} + +static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix) +{ + return ((pix & 0x00e00000) >> 16) | + ((pix & 0x0000e000) >> 11) | + ((pix & 0x000000c0) >> 6); +} + +static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix) +{ + return ((pix & 0x00f80000) >> 8) | + ((pix & 0x0000fc00) >> 5) | + ((pix & 0x000000f8) >> 3); +} + +static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix) +{ + return ((pix & 0x00f80000) >> 8) | + ((pix & 0x0000f800) >> 5) | + ((pix & 0x000000f8) >> 2); +} + +static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix) +{ + return drm_pixel_xrgb8888_to_rgbx5551(pix) | + BIT(0); /* set alpha bit */ +} + +static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix) +{ + return ((pix & 0x00f80000) >> 9) | + ((pix & 0x0000f800) >> 6) | + ((pix & 0x000000f8) >> 3); +} + +static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix) +{ + return BIT(15) | /* set alpha bit */ + drm_pixel_xrgb8888_to_xrgb1555(pix); +} + +static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix) +{ + return pix & GENMASK(23, 0); +} + +static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix) +{ + return ((pix & 0x00ff0000) >> 16) | + ((pix & 0x0000ff00)) | + ((pix & 0x000000ff) << 16); +} + +static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix) +{ + return GENMASK(31, 24) | /* fill alpha bits */ + pix; +} + +static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix) +{ + return ((pix & 0xff000000)) | /* also copy filler bits */ + ((pix & 0x00ff0000) >> 16) | + ((pix & 0x0000ff00)) | + ((pix & 0x000000ff) << 16); +} + +static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix) +{ + return GENMASK(31, 24) | /* fill alpha bits */ + drm_pixel_xrgb8888_to_xbgr8888(pix); +} + +static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix) +{ + pix = ((pix & 0x000000ff) << 2) | + ((pix & 0x0000ff00) << 4) | + ((pix & 0x00ff0000) << 6); + return pix | ((pix >> 8) & 0x00300c03); +} + +static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix) +{ + return GENMASK(31, 30) | /* set alpha bits */ + drm_pixel_xrgb8888_to_xrgb2101010(pix); +} + +static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix) +{ + pix = ((pix & 0x00ff0000) >> 14) | + ((pix & 0x0000ff00) << 4) | + ((pix & 0x000000ff) << 22); + return pix | ((pix >> 8) & 0x00300c03); +} + +static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix) +{ + return GENMASK(31, 30) | /* set alpha bits */ + drm_pixel_xrgb8888_to_xbgr2101010(pix); +} + +/* + * Conversion from ARGB8888 + */ + +static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix) +{ + return ((pix & 0xf0000000) >> 16) | + ((pix & 0x00f00000) >> 12) | + ((pix & 0x0000f000) >> 8) | + ((pix & 0x000000f0) >> 4); +} + +#endif diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c6240bab3fa5..1e659d2660f7 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1216,7 +1216,7 @@ void drm_gem_unpin(struct drm_gem_object *obj) dma_resv_unlock(obj->resv); } -int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) +int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) { int ret; @@ -1233,9 +1233,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) return 0; } -EXPORT_SYMBOL(drm_gem_vmap); +EXPORT_SYMBOL(drm_gem_vmap_locked); -void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_assert_held(obj->resv); @@ -1248,7 +1248,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) /* Always set the mapping to NULL. Callers may rely on this. */ iosys_map_clear(map); } -EXPORT_SYMBOL(drm_gem_vunmap); +EXPORT_SYMBOL(drm_gem_vunmap_locked); void drm_gem_lock(struct drm_gem_object *obj) { @@ -1262,25 +1262,25 @@ void drm_gem_unlock(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_unlock); -int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; dma_resv_lock(obj->resv, NULL); - ret = drm_gem_vmap(obj, map); + ret = drm_gem_vmap_locked(obj, map); dma_resv_unlock(obj->resv); return ret; } -EXPORT_SYMBOL(drm_gem_vmap_unlocked); +EXPORT_SYMBOL(drm_gem_vmap); -void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_lock(obj->resv, NULL); - drm_gem_vunmap(obj, map); + drm_gem_vunmap_locked(obj, map); dma_resv_unlock(obj->resv); } -EXPORT_SYMBOL(drm_gem_vunmap_unlocked); +EXPORT_SYMBOL(drm_gem_vunmap); /** * drm_gem_lock_reservations - Sets up the ww context and acquires @@ -1543,10 +1543,10 @@ tail: EXPORT_SYMBOL(drm_gem_lru_scan); /** - * drm_gem_evict - helper to evict backing pages for a GEM object + * drm_gem_evict_locked - helper to evict backing pages for a GEM object * @obj: obj in question */ -int drm_gem_evict(struct drm_gem_object *obj) +int drm_gem_evict_locked(struct drm_gem_object *obj) { dma_resv_assert_held(obj->resv); @@ -1558,4 +1558,4 @@ int drm_gem_evict(struct drm_gem_object *obj) return 0; } -EXPORT_SYMBOL(drm_gem_evict); +EXPORT_SYMBOL(drm_gem_evict_locked); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 0fbeb686e561..6f72e7a0f427 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -362,7 +362,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, ret = -EINVAL; goto err_drm_gem_vunmap; } - ret = drm_gem_vmap_unlocked(obj, &map[i]); + ret = drm_gem_vmap(obj, &map[i]); if (ret) goto err_drm_gem_vunmap; } @@ -384,7 +384,7 @@ err_drm_gem_vunmap: obj = drm_gem_fb_get_obj(fb, i); if (!obj) continue; - drm_gem_vunmap_unlocked(obj, &map[i]); + drm_gem_vunmap(obj, &map[i]); } return ret; } @@ -411,7 +411,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map) continue; if (iosys_map_is_null(&map[i])) continue; - drm_gem_vunmap_unlocked(obj, &map[i]); + drm_gem_vunmap(obj, &map[i]); } } EXPORT_SYMBOL(drm_gem_fb_vunmap); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index d99dee67353a..aa43265f4f4f 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } else { dma_resv_lock(shmem->base.resv, NULL); - drm_WARN_ON(obj->dev, shmem->vmap_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count)); if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, @@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); } if (shmem->pages) - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); - drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count)); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count)); dma_resv_unlock(shmem->base.resv); } @@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); -static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct page **pages; dma_resv_assert_held(shmem->base.resv); - if (shmem->pages_use_count++ > 0) + if (refcount_inc_not_zero(&shmem->pages_use_count)) return 0; pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", PTR_ERR(pages)); - shmem->pages_use_count = 0; return PTR_ERR(pages); } @@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) shmem->pages = pages; + refcount_set(&shmem->pages_use_count, 1); + return 0; } /* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object * @shmem: shmem GEM object * * This function decreases the use count and puts the backing pages when use drops to zero. */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; dma_resv_assert_held(shmem->base.resv); - if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) - return; - - if (--shmem->pages_use_count > 0) - return; - + if (refcount_dec_and_test(&shmem->pages_use_count)) { #ifdef CONFIG_X86 - if (shmem->map_wc) - set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); + if (shmem->map_wc) + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); #endif - drm_gem_put_pages(obj, shmem->pages, - shmem->pages_mark_dirty_on_put, - shmem->pages_mark_accessed_on_put); - shmem->pages = NULL; + drm_gem_put_pages(obj, shmem->pages, + shmem->pages_mark_dirty_on_put, + shmem->pages_mark_accessed_on_put); + shmem->pages = NULL; + } } -EXPORT_SYMBOL(drm_gem_shmem_put_pages); +EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked); int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { @@ -257,7 +255,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); - ret = drm_gem_shmem_get_pages(shmem); + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; + + ret = drm_gem_shmem_get_pages_locked(shmem); + if (!ret) + refcount_set(&shmem->pages_pin_count, 1); return ret; } @@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) { dma_resv_assert_held(shmem->base.resv); - drm_gem_shmem_put_pages(shmem); + if (refcount_dec_and_test(&shmem->pages_pin_count)) + drm_gem_shmem_put_pages_locked(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); @@ -288,6 +292,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); if (ret) return ret; @@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) return ret; } -EXPORT_SYMBOL(drm_gem_shmem_pin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_pin); /** * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object @@ -311,14 +318,17 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + if (refcount_dec_not_one(&shmem->pages_pin_count)) + return; + dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_unpin_locked(shmem); dma_resv_unlock(shmem->base.resv); } -EXPORT_SYMBOL(drm_gem_shmem_unpin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin); /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing * store. @@ -327,47 +337,43 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin); * exists for the buffer backing the shmem GEM object. It hides the differences * between dma-buf imported and natively allocated objects. * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked(). * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; if (drm_gem_is_imported(obj)) { ret = dma_buf_vmap(obj->dma_buf, map); - if (!ret) { - if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->dma_buf, map); - return -EIO; - } - } } else { pgprot_t prot = PAGE_KERNEL; dma_resv_assert_held(shmem->base.resv); - if (shmem->vmap_use_count++ > 0) { + if (refcount_inc_not_zero(&shmem->vmap_use_count)) { iosys_map_set_vaddr(map, shmem->vaddr); return 0; } - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_pin_locked(shmem); if (ret) - goto err_zero_use; + return ret; if (shmem->map_wc) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); - if (!shmem->vaddr) + if (!shmem->vaddr) { ret = -ENOMEM; - else + } else { iosys_map_set_vaddr(map, shmem->vaddr); + refcount_set(&shmem->vmap_use_count, 1); + } } if (ret) { @@ -379,28 +385,26 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, err_put_pages: if (!drm_gem_is_imported(obj)) - drm_gem_shmem_put_pages(shmem); -err_zero_use: - shmem->vmap_use_count = 0; + drm_gem_shmem_unpin_locked(shmem); return ret; } -EXPORT_SYMBOL(drm_gem_shmem_vmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked); /* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. + * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count + * drops to zero. * * This function hides the differences between dma-buf imported and natively * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; @@ -409,19 +413,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, } else { dma_resv_assert_held(shmem->base.resv); - if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) - return; - - if (--shmem->vmap_use_count > 0) - return; + if (refcount_dec_and_test(&shmem->vmap_use_count)) { + vunmap(shmem->vaddr); + shmem->vaddr = NULL; - vunmap(shmem->vaddr); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_unpin_locked(shmem); + } } - - shmem->vaddr = NULL; } -EXPORT_SYMBOL(drm_gem_shmem_vunmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked); static int drm_gem_shmem_create_with_handle(struct drm_file *file_priv, @@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, /* Update madvise status, returns true if not purged, else * false or -errno. */ -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv) { dma_resv_assert_held(shmem->base.resv); @@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) return (madv >= 0); } -EXPORT_SYMBOL(drm_gem_shmem_madvise); +EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked); -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; @@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); shmem->sgt = NULL; - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); shmem->madv = -1; @@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge); +EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object @@ -575,8 +575,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) * mmap'd, vm_open() just grabs an additional reference for the new * mm the vma is getting copied into (ie. on fork()). */ - if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) - shmem->pages_use_count++; + drm_WARN_ON_ONCE(obj->dev, + !refcount_inc_not_zero(&shmem->pages_use_count)); dma_resv_unlock(shmem->base.resv); @@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); dma_resv_lock(shmem->base.resv, NULL); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); dma_resv_unlock(shmem->base.resv); drm_gem_vm_close(vma); @@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return -EINVAL; dma_resv_lock(shmem->base.resv, NULL); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); dma_resv_unlock(shmem->base.resv); if (ret) @@ -666,11 +666,12 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, if (drm_gem_is_imported(&shmem->base)) return; - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); - drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); + drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count)); + drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count)); + drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count)); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); } -EXPORT_SYMBOL(drm_gem_shmem_print_info); +EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info); /** * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned @@ -707,7 +708,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) return ERR_PTR(ret); @@ -729,7 +730,7 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index b2b6a8e49dda..e44f28fd81d3 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -179,8 +179,8 @@ int drm_gem_pin_locked(struct drm_gem_object *obj); void drm_gem_unpin_locked(struct drm_gem_object *obj); int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); -int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); -void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); +int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map); /* drm_debugfs.c drm_debugfs_crc.c */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index dfa595556320..e5184a0c2465 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -36,6 +36,8 @@ #include <drm/drm_mipi_dsi.h> #include <drm/drm_print.h> +#include <linux/media-bus-format.h> + #include <video/mipi_display.h> /** @@ -871,6 +873,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, EXPORT_SYMBOL(mipi_dsi_generic_read); /** + * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based + * input pixel format for a given DSI output + * pixel format + * @dsi_format: pixel format that a DSI host needs to output + * + * Various DSI hosts can use this function during their + * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain + * the MEDIA_BUS_FMT_* pixel format required as input. + * + * RETURNS: + * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure. + */ +u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format) +{ + switch (dsi_format) { + case MIPI_DSI_FMT_RGB888: + return MEDIA_BUS_FMT_RGB888_1X24; + + case MIPI_DSI_FMT_RGB666: + return MEDIA_BUS_FMT_RGB666_1X24_CPADHI; + + case MIPI_DSI_FMT_RGB666_PACKED: + return MEDIA_BUS_FMT_RGB666_1X18; + + case MIPI_DSI_FMT_RGB565: + return MEDIA_BUS_FMT_RGB565_1X16; + + default: + /* Unsupported DSI Format */ + return 0; + } +} +EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt); + +/** * mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload * @dsi: DSI peripheral device * @data: buffer containing data to be transmitted diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index c627e42a7ce7..650de4da0853 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -74,8 +74,9 @@ EXPORT_SYMBOL(drm_panel_init); * drm_panel_add - add a panel to the global registry * @panel: panel to add * - * Add a panel to the global registry so that it can be looked up by display - * drivers. + * Add a panel to the global registry so that it can be looked + * up by display drivers. The panel to be added must have been + * allocated by devm_drm_panel_alloc(). */ void drm_panel_add(struct drm_panel *panel) { @@ -105,21 +106,21 @@ EXPORT_SYMBOL(drm_panel_remove); * * Calling this function will enable power and deassert any reset signals to * the panel. After this has completed it is possible to communicate with any - * integrated circuitry via a command bus. - * - * Return: 0 on success or a negative error code on failure. + * integrated circuitry via a command bus. This function cannot fail (as it is + * called from the pre_enable call chain). There will always be a call to + * drm_panel_disable() afterwards. */ -int drm_panel_prepare(struct drm_panel *panel) +void drm_panel_prepare(struct drm_panel *panel) { struct drm_panel_follower *follower; int ret; if (!panel) - return -EINVAL; + return; if (panel->prepared) { dev_warn(panel->dev, "Skipping prepare of already prepared panel\n"); - return 0; + return; } mutex_lock(&panel->follower_lock); @@ -138,11 +139,8 @@ int drm_panel_prepare(struct drm_panel *panel) follower->funcs->panel_prepared, ret); } - ret = 0; exit: mutex_unlock(&panel->follower_lock); - - return ret; } EXPORT_SYMBOL(drm_panel_prepare); @@ -154,16 +152,14 @@ EXPORT_SYMBOL(drm_panel_prepare); * reset, turn off power supplies, ...). After this function has completed, it * is usually no longer possible to communicate with the panel until another * call to drm_panel_prepare(). - * - * Return: 0 on success or a negative error code on failure. */ -int drm_panel_unprepare(struct drm_panel *panel) +void drm_panel_unprepare(struct drm_panel *panel) { struct drm_panel_follower *follower; int ret; if (!panel) - return -EINVAL; + return; /* * If you are seeing the warning below it likely means one of two things: @@ -176,7 +172,7 @@ int drm_panel_unprepare(struct drm_panel *panel) */ if (!panel->prepared) { dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n"); - return 0; + return; } mutex_lock(&panel->follower_lock); @@ -195,11 +191,8 @@ int drm_panel_unprepare(struct drm_panel *panel) } panel->prepared = false; - ret = 0; exit: mutex_unlock(&panel->follower_lock); - - return ret; } EXPORT_SYMBOL(drm_panel_unprepare); @@ -209,26 +202,26 @@ EXPORT_SYMBOL(drm_panel_unprepare); * * Calling this function will cause the panel display drivers to be turned on * and the backlight to be enabled. Content will be visible on screen after - * this call completes. - * - * Return: 0 on success or a negative error code on failure. + * this call completes. This function cannot fail (as it is called from the + * enable call chain). There will always be a call to drm_panel_disable() + * afterwards. */ -int drm_panel_enable(struct drm_panel *panel) +void drm_panel_enable(struct drm_panel *panel) { int ret; if (!panel) - return -EINVAL; + return; if (panel->enabled) { dev_warn(panel->dev, "Skipping enable of already enabled panel\n"); - return 0; + return; } if (panel->funcs && panel->funcs->enable) { ret = panel->funcs->enable(panel); if (ret < 0) - return ret; + return; } panel->enabled = true; @@ -236,8 +229,6 @@ int drm_panel_enable(struct drm_panel *panel) if (ret < 0) DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n", ret); - - return 0; } EXPORT_SYMBOL(drm_panel_enable); @@ -248,15 +239,13 @@ EXPORT_SYMBOL(drm_panel_enable); * This will typically turn off the panel's backlight or disable the display * drivers. For smart panels it should still be possible to communicate with * the integrated circuitry via any command bus after this call. - * - * Return: 0 on success or a negative error code on failure. */ -int drm_panel_disable(struct drm_panel *panel) +void drm_panel_disable(struct drm_panel *panel) { int ret; if (!panel) - return -EINVAL; + return; /* * If you are seeing the warning below it likely means one of two things: @@ -269,7 +258,7 @@ int drm_panel_disable(struct drm_panel *panel) */ if (!panel->enabled) { dev_warn(panel->dev, "Skipping disable of already disabled panel\n"); - return 0; + return; } ret = backlight_disable(panel->backlight); @@ -280,11 +269,9 @@ int drm_panel_disable(struct drm_panel *panel) if (panel->funcs && panel->funcs->disable) { ret = panel->funcs->disable(panel); if (ret < 0) - return ret; + return; } panel->enabled = false; - - return 0; } EXPORT_SYMBOL(drm_panel_disable); @@ -317,6 +304,93 @@ int drm_panel_get_modes(struct drm_panel *panel, } EXPORT_SYMBOL(drm_panel_get_modes); +static void __drm_panel_free(struct kref *kref) +{ + struct drm_panel *panel = container_of(kref, struct drm_panel, refcount); + + kfree(panel->container); +} + +/** + * drm_panel_get - Acquire a panel reference + * @panel: DRM panel + * + * This function increments the panel's refcount. + * Returns: + * Pointer to @panel + */ +struct drm_panel *drm_panel_get(struct drm_panel *panel) +{ + if (!panel) + return panel; + + kref_get(&panel->refcount); + + return panel; +} +EXPORT_SYMBOL(drm_panel_get); + +/** + * drm_panel_put - Release a panel reference + * @panel: DRM panel + * + * This function decrements the panel's reference count and frees the + * object if the reference count drops to zero. + */ +void drm_panel_put(struct drm_panel *panel) +{ + if (panel) + kref_put(&panel->refcount, __drm_panel_free); +} +EXPORT_SYMBOL(drm_panel_put); + +/** + * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer + * + * @data: pointer to @struct drm_panel, cast to a void pointer + * + * Wrapper of drm_panel_put() to be used when a function taking a void + * pointer is needed, for example as a devm action. + */ +static void drm_panel_put_void(void *data) +{ + struct drm_panel *panel = (struct drm_panel *)data; + + drm_panel_put(panel); +} + +void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset, + const struct drm_panel_funcs *funcs, + int connector_type) +{ + void *container; + struct drm_panel *panel; + int err; + + if (!funcs) { + dev_warn(dev, "Missing funcs pointer\n"); + return ERR_PTR(-EINVAL); + } + + container = kzalloc(size, GFP_KERNEL); + if (!container) + return ERR_PTR(-ENOMEM); + + panel = container + offset; + panel->container = container; + panel->funcs = funcs; + kref_init(&panel->refcount); + + err = devm_add_action_or_reset(dev, drm_panel_put_void, panel); + if (err) + return ERR_PTR(err); + + drm_panel_init(panel, dev, funcs, connector_type); + + return container; +} +EXPORT_SYMBOL(__devm_drm_panel_alloc); + #ifdef CONFIG_OF /** * of_drm_find_panel - look up a panel using a device tree node diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index c554ad8f246b..7ac0fd5391fe 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -517,6 +517,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* ZOTAC Gaming Zone */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* One Mix 2S (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index b47ea25fdfaa..b4de79583805 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -7,6 +7,7 @@ */ #include <linux/font.h> +#include <linux/highmem.h> #include <linux/init.h> #include <linux/iosys-map.h> #include <linux/kdebug.h> @@ -154,6 +155,90 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color); } +static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color) +{ + u16 *p = vaddr + offset; + + *p = color; +} + +static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color) +{ + u8 *p = vaddr + offset; + + *p++ = color & 0xff; + color >>= 8; + *p++ = color & 0xff; + color >>= 8; + *p = color & 0xff; +} + +static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color) +{ + u32 *p = vaddr + offset; + + *p = color; +} + +static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp) +{ + switch (cpp) { + case 2: + drm_panic_write_pixel16(vaddr, offset, color); + break; + case 3: + drm_panic_write_pixel24(vaddr, offset, color); + break; + case 4: + drm_panic_write_pixel32(vaddr, offset, color); + break; + default: + pr_debug_once("Can't blit with pixel width %d\n", cpp); + } +} + +/* + * The scanout buffer pages are not mapped, so for each pixel, + * use kmap_local_page_try_from_panic() to map the page, and write the pixel. + * Try to keep the map from the previous pixel, to avoid too much map/unmap. + */ +static void drm_panic_blit_page(struct page **pages, unsigned int dpitch, + unsigned int cpp, const u8 *sbuf8, + unsigned int spitch, struct drm_rect *clip, + unsigned int scale, u32 fg32) +{ + unsigned int y, x; + unsigned int page = ~0; + unsigned int height = drm_rect_height(clip); + unsigned int width = drm_rect_width(clip); + void *vaddr = NULL; + + for (y = 0; y < height; y++) { + for (x = 0; x < width; x++) { + if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) { + unsigned int new_page; + unsigned int offset; + + offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp; + new_page = offset >> PAGE_SHIFT; + offset = offset % PAGE_SIZE; + if (new_page != page) { + if (!pages[new_page]) + continue; + if (vaddr) + kunmap_local(vaddr); + page = new_page; + vaddr = kmap_local_page_try_from_panic(pages[page]); + } + if (vaddr) + drm_panic_write_pixel(vaddr, offset, fg32, cpp); + } + } + } + if (vaddr) + kunmap_local(vaddr); +} + /* * drm_panic_blit - convert a monochrome image to a linear framebuffer * @sb: destination scanout buffer @@ -177,6 +262,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip, if (sb->set_pixel) return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color); + if (sb->pages) + return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0], + sbuf8, spitch, clip, scale, fg_color); + map = sb->map[0]; iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]); @@ -209,6 +298,35 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb, sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color); } +static void drm_panic_fill_page(struct page **pages, unsigned int dpitch, + unsigned int cpp, struct drm_rect *clip, + u32 color) +{ + unsigned int y, x; + unsigned int page = ~0; + void *vaddr = NULL; + + for (y = clip->y1; y < clip->y2; y++) { + for (x = clip->x1; x < clip->x2; x++) { + unsigned int new_page; + unsigned int offset; + + offset = y * dpitch + x * cpp; + new_page = offset >> PAGE_SHIFT; + offset = offset % PAGE_SIZE; + if (new_page != page) { + if (vaddr) + kunmap_local(vaddr); + page = new_page; + vaddr = kmap_local_page_try_from_panic(pages[page]); + } + drm_panic_write_pixel(vaddr, offset, color, cpp); + } + } + if (vaddr) + kunmap_local(vaddr); +} + /* * drm_panic_fill - Fill a rectangle with a color * @sb: destination scanout buffer @@ -225,6 +343,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip, if (sb->set_pixel) return drm_panic_fill_pixel(sb, clip, color); + if (sb->pages) + return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0], + clip, color); + map = sb->map[0]; iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]); @@ -709,16 +831,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description) if (!drm_panic_trylock(plane->dev, flags)) return; + ret = plane->helper_private->get_scanout_buffer(plane, &sb); + + if (ret || !drm_panic_is_format_supported(sb.format)) + goto unlock; + + /* One of these should be set, or it can't draw pixels */ + if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0])) + goto unlock; + drm_panic_set_description(description); - ret = plane->helper_private->get_scanout_buffer(plane, &sb); + draw_panic_dispatch(&sb); + if (plane->helper_private->panic_flush) + plane->helper_private->panic_flush(plane); - if (!ret && drm_panic_is_format_supported(sb.format)) { - draw_panic_dispatch(&sb); - if (plane->helper_private->panic_flush) - plane->helper_private->panic_flush(plane); - } drm_panic_clear_description(); + +unlock: drm_panic_unlock(plane->dev, flags); } diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index f2a99681b998..6025a705530e 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -5,7 +5,7 @@ //! It is called from a panic handler, so it should't allocate memory and //! does all the work on the stack or on the provided buffers. For //! simplification, it only supports low error correction, and applies the -//! first mask (checkerboard). It will draw the smallest QRcode that can +//! first mask (checkerboard). It will draw the smallest QR code that can //! contain the string passed as parameter. To get the most compact //! QR code, the start of the URL is encoded as binary, and the //! compressed kmsg is encoded as numeric. @@ -315,7 +315,7 @@ impl Segment<'_> { } } - // Returns the size of the length field in bits, depending on QR Version. + /// Returns the size of the length field in bits, depending on QR Version. fn length_bits_count(&self, version: Version) -> usize { let Version(v) = version; match self { @@ -331,7 +331,7 @@ impl Segment<'_> { } } - // Number of characters in the segment. + /// Number of characters in the segment. fn character_count(&self) -> usize { match self { Segment::Binary(data) => data.len(), @@ -415,7 +415,7 @@ impl Iterator for SegmentIterator<'_> { self.carry_len -= out_len; let pow = u64::pow(10, self.carry_len as u32); let out = (self.carry / pow) as u16; - self.carry = self.carry % pow; + self.carry %= pow; Some((out, NUM_CHARS_BITS[out_len])) } } @@ -569,8 +569,8 @@ struct EncodedMsgIterator<'a> { impl Iterator for EncodedMsgIterator<'_> { type Item = u8; - // Send the bytes in interleaved mode, first byte of first block of group1, - // then first byte of second block of group1, ... + /// Send the bytes in interleaved mode, first byte of first block of group1, + /// then first byte of second block of group1, ... fn next(&mut self) -> Option<Self::Item> { let em = self.em; let blocks = em.g1_blocks + em.g2_blocks; @@ -638,7 +638,7 @@ impl QrImage<'_> { self.data.fill(0); } - // Set pixel to light color. + /// Set pixel to light color. fn set(&mut self, x: u8, y: u8) { let off = y as usize * self.stride as usize + x as usize / 8; let mut v = self.data[off]; @@ -646,13 +646,13 @@ impl QrImage<'_> { self.data[off] = v; } - // Invert a module color. + /// Invert a module color. fn xor(&mut self, x: u8, y: u8) { let off = y as usize * self.stride as usize + x as usize / 8; self.data[off] ^= 0x80 >> (x % 8); } - // Draw a light square at (x, y) top left corner. + /// Draw a light square at (x, y) top left corner. fn draw_square(&mut self, x: u8, y: u8, size: u8) { for k in 0..size { self.set(x + k, y); @@ -784,7 +784,7 @@ impl QrImage<'_> { vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6)) } - // Returns true if the module is reserved (Not usable for data and EC). + /// Returns true if the module is reserved (Not usable for data and EC). fn is_reserved(&self, x: u8, y: u8) -> bool { self.is_alignment(x, y) || self.is_finder(x, y) @@ -793,13 +793,14 @@ impl QrImage<'_> { || self.is_version_info(x, y) } - // Last module to draw, at bottom left corner. + /// Last module to draw, at bottom left corner. fn is_last(&self, x: u8, y: u8) -> bool { x == 0 && y == self.width - 1 } - // Move to the next module according to QR code order. - // From bottom right corner, to bottom left corner. + /// Move to the next module according to QR code order. + /// + /// From bottom right corner, to bottom left corner. fn next(&self, x: u8, y: u8) -> (u8, u8) { let x_adj = if x <= 6 { x + 1 } else { x }; let column_type = (self.width - x_adj) % 4; @@ -812,7 +813,7 @@ impl QrImage<'_> { } } - // Find next module that can hold data. + /// Find next module that can hold data. fn next_available(&self, x: u8, y: u8) -> (u8, u8) { let (mut x, mut y) = self.next(x, y); while self.is_reserved(x, y) && !self.is_last(x, y) { @@ -841,7 +842,7 @@ impl QrImage<'_> { } } - // Apply checkerboard mask to all non-reserved modules. + /// Apply checkerboard mask to all non-reserved modules. fn apply_mask(&mut self) { for x in 0..self.width { for y in 0..self.width { @@ -852,7 +853,7 @@ impl QrImage<'_> { } } - // Draw the QR code with the provided data iterator. + /// Draw the QR code with the provided data iterator. fn draw_all(&mut self, data: impl Iterator<Item = u8>) { // First clear the table, as it may have already some data. self.clear(); @@ -876,7 +877,7 @@ impl QrImage<'_> { /// will be encoded as binary segment, otherwise it will be encoded /// efficiently as a numeric segment, and appended to the URL. /// * `data_len`: Length of the data, that needs to be encoded, must be less -/// than data_size. +/// than `data_size`. /// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold /// a V40 QR code. It will then be overwritten with the QR code image. /// * `tmp`: A temporary buffer that the QR code encoder will use, to write the diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index bdb51c8f262e..d828502268b8 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -707,7 +707,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; - return drm_gem_vmap(obj, map); + return drm_gem_vmap_locked(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); @@ -723,7 +723,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; - drm_gem_vunmap(obj, map); + drm_gem_vunmap_locked(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); @@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) EXPORT_SYMBOL(drm_gem_dmabuf_mmap); static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { - .cache_sgt_mapping = true, .attach = drm_gem_map_attach, .detach = drm_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, @@ -998,7 +997,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev); struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { - return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); + return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev)); } EXPORT_SYMBOL(drm_gem_prime_import); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 7ba16323e7c2..6b3541159c0f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res) * cleaned up when the DRM device goes away. * * See drm_kms_helper_poll_init() for more information. - * - * Returns: - * 0 on success, or a negative errno code otherwise. */ -int drmm_kms_helper_poll_init(struct drm_device *dev) +void drmm_kms_helper_poll_init(struct drm_device *dev) { + int ret; + drm_kms_helper_poll_init(dev); - return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev); + ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev); + if (ret) + drm_warn(dev, "Connector status will not be updated, error %d\n", ret); } EXPORT_SYMBOL(drmm_kms_helper_poll_init); diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 4f2ab8a7b50f..636cd83ca29e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -741,7 +741,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, } static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, - int fd, int handle) + int fd, int handle, u64 point) { struct dma_fence *fence = sync_file_get_fence(fd); struct drm_syncobj *syncobj; @@ -755,14 +755,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, return -ENOENT; } - drm_syncobj_replace_fence(syncobj, fence); + if (point) { + struct dma_fence_chain *chain = dma_fence_chain_alloc(); + + if (!chain) + return -ENOMEM; + + drm_syncobj_add_point(syncobj, chain, fence, point); + } else { + drm_syncobj_replace_fence(syncobj, fence); + } + dma_fence_put(fence); drm_syncobj_put(syncobj); return 0; } static int drm_syncobj_export_sync_file(struct drm_file *file_private, - int handle, int *p_fd) + int handle, u64 point, int *p_fd) { int ret; struct dma_fence *fence; @@ -772,7 +782,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private, if (fd < 0) return fd; - ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence); + ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence); if (ret) goto err_put_fd; @@ -869,6 +879,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private) { struct drm_syncobj_handle *args = data; + unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE | + DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE; + u64 point = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -EOPNOTSUPP; @@ -876,13 +889,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, if (args->pad) return -EINVAL; - if (args->flags != 0 && - args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) + if (args->flags & ~valid_flags) return -EINVAL; + if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE) + point = args->point; + if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE) return drm_syncobj_export_sync_file(file_private, args->handle, - &args->fd); + point, &args->fd); + + if (args->point) + return -EINVAL; return drm_syncobj_handle_to_fd(file_private, args->handle, &args->fd); @@ -893,6 +911,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private) { struct drm_syncobj_handle *args = data; + unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE | + DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE; + u64 point = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -EOPNOTSUPP; @@ -900,14 +921,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, if (args->pad) return -EINVAL; - if (args->flags != 0 && - args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) + if (args->flags & ~valid_flags) return -EINVAL; + if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE) + point = args->point; + if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE) return drm_syncobj_import_sync_file_fence(file_private, args->fd, - args->handle); + args->handle, + point); + + if (args->point) + return -EINVAL; return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 42e57d142554..917ad527c961 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) int etnaviv_gem_prime_pin(struct drm_gem_object *obj) { - if (!obj->import_attach) { + if (!drm_gem_is_imported(obj)) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); mutex_lock(&etnaviv_obj->lock); @@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj) void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) { - if (!obj->import_attach) { + if (!drm_gem_is_imported(obj)) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); mutex_lock(&etnaviv_obj->lock); @@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) - dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map); + dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: @@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) lockdep_assert_held(&etnaviv_obj->lock); - ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); + ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map); if (ret) return NULL; return map.vaddr; diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index b34ec6728337..29a8366513fa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev) struct resource res; int ret, i; - mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL); - if (!mic) { + mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs); + if (IS_ERR(mic)) { DRM_DEV_ERROR(dev, "mic: Failed to allocate memory for MIC object\n"); - ret = -ENOMEM; + ret = PTR_ERR(mic); goto err; } @@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mic); - mic->bridge.funcs = &mic_bridge_funcs; mic->bridge.of_node = dev->of_node; drm_bridge_add(&mic->bridge); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 03b076db9381..3bbfc1b56a65 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) struct fsl_dcu_drm_device *fsl_dev; struct drm_device *drm; struct device *dev = &pdev->dev; - struct resource *res; void __iomem *base; struct clk *pix_clk_in; char pix_clk_name[32]; @@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) return -ENODEV; fsl_dev->soc = id->data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { ret = PTR_ERR(base); return ret; diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index 4d78b33eaa82..e6753282e70e 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c @@ -730,44 +730,3 @@ out: return ret; } - -int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, - unsigned long *pfn) -{ - int ret; - struct psb_mmu_pt *pt; - uint32_t tmp; - spinlock_t *lock = &pd->driver->lock; - - down_read(&pd->driver->sem); - pt = psb_mmu_pt_map_lock(pd, virtual); - if (!pt) { - uint32_t *v; - - spin_lock(lock); - v = kmap_atomic(pd->p); - tmp = v[psb_mmu_pd_index(virtual)]; - kunmap_atomic(v); - spin_unlock(lock); - - if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || - !(pd->invalid_pte & PSB_PTE_VALID)) { - ret = -EINVAL; - goto out; - } - ret = 0; - *pfn = pd->invalid_pte >> PAGE_SHIFT; - goto out; - } - tmp = pt->v[psb_mmu_pt_index(virtual)]; - if (!(tmp & PSB_PTE_VALID)) { - ret = -EINVAL; - } else { - ret = 0; - *pfn = tmp >> PAGE_SHIFT; - } - psb_mmu_pt_unmap_unlock(pt); -out: - up_read(&pd->driver->sem); - return ret; -} diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h index d4b5720ef08e..e6d39703718c 100644 --- a/drivers/gpu/drm/gma500/mmu.h +++ b/drivers/gpu/drm/gma500/mmu.h @@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, unsigned long address, uint32_t num_pages, int type); -extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, - unsigned long *pfn); extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context); extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, unsigned long address, uint32_t num_pages, diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index de8ccfe9890f..ea9b41af0867 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = { .prepare = gma_crtc_prepare, .commit = gma_crtc_commit, }; - -/* Not used yet */ -const struct gma_clock_funcs mrst_clock_funcs = { - .clock = mrst_lvds_clock, - .limit = mrst_limit, - .pll_is_valid = gma_pll_is_valid, -}; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 9dc9dcd1b09f..979ea8ecf0d5 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg, void gma_i2c_destroy(struct gma_i2c_chan *chan); int psb_intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter); -extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter); extern void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct psb_intel_mode_device *mode_dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c index 8be0ec340de5..45b10f30a2a9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_modes.c +++ b/drivers/gpu/drm/gma500/psb_intel_modes.c @@ -12,37 +12,6 @@ #include "psb_intel_drv.h" /** - * psb_intel_ddc_probe - * @adapter: Associated I2C adaptor - */ -bool psb_intel_ddc_probe(struct i2c_adapter *adapter) -{ - u8 out_buf[] = { 0x0, 0x0 }; - u8 buf[2]; - int ret; - struct i2c_msg msgs[] = { - { - .addr = 0x50, - .flags = 0, - .len = 1, - .buf = out_buf, - }, - { - .addr = 0x50, - .flags = I2C_M_RD, - .len = 1, - .buf = buf, - } - }; - - ret = i2c_transfer(adapter, msgs, 2); - if (ret == 2) - return true; - - return false; -} - -/** * psb_intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use * @adapter: Associated I2C adaptor diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index cb405771d6e2..5385a2126e45 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -309,21 +309,6 @@ out: return ret; } -/* - * FIXME: Dma-buf sharing requires DMA support by the importing device. - * This function is a workaround to make USB devices work as well. - * See todo.rst for how to fix the issue in the dma-buf framework. - */ -static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf) -{ - struct gud_device *gdrm = to_gud_device(drm); - - if (!gdrm->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev); -} - static int gud_stats_debugfs(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; @@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = gud_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, .name = "gud", @@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) size_t max_buffer_size = 0; struct gud_device *gdrm; struct drm_device *drm; + struct device *dma_dev; u8 *formats_dev; u32 *formats; int ret, i; @@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) usb_set_intfdata(intf, gdrm); - gdrm->dmadev = usb_intf_get_dma_device(intf); - if (!gdrm->dmadev) - dev_warn(dev, "buffer sharing not supported"); + dma_dev = usb_intf_get_dma_device(intf); + if (dma_dev) { + drm_dev_set_dma_dev(drm, dma_dev); + put_device(dma_dev); + } else { + dev_warn(dev, "buffer sharing not supported"); /* not an error */ + } drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL); ret = drm_dev_register(drm, 0); - if (ret) { - put_device(gdrm->dmadev); + if (ret) return ret; - } drm_kms_helper_poll_init(drm); @@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface) drm_kms_helper_poll_fini(drm); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - put_device(gdrm->dmadev); - gdrm->dmadev = NULL; } static int gud_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h index 0d148a6f27aa..d6fb25388722 100644 --- a/drivers/gpu/drm/gud/gud_internal.h +++ b/drivers/gpu/drm/gud/gud_internal.h @@ -16,7 +16,6 @@ struct gud_device { struct drm_device drm; struct drm_simple_display_pipe pipe; - struct device *dmadev; struct work_struct work; u32 flags; const struct drm_format_info *xrgb8888_emulation_format; diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile index 95a4ed599d98..1f65c683282f 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Makefile +++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \ - dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o + dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \ + hibmc_drm_debugfs.o obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c index 0a903cce1fa9..8732cd1d8cb6 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c @@ -8,6 +8,7 @@ #include <drm/drm_print.h> #include "dp_comm.h" #include "dp_reg.h" +#include "dp_hw.h" #define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4) #define HIBMC_AUX_CMD_ADDR GENMASK(27, 8) @@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms /* ret >= 0 ,ret is size; ret < 0, ret is err code */ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { - struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux); + struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux); + struct hibmc_dp_dev *dp = dp_priv->dp_dev; u32 aux_cmd; int ret; u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */ @@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg * return hibmc_dp_aux_parse_xfer(dp, msg); } -void hibmc_dp_aux_init(struct hibmc_dp_dev *dp) +void hibmc_dp_aux_init(struct hibmc_dp *dp) { - hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0); - hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1); - hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, + hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0); + hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1); + hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, HIBMC_DP_MIN_PULSE_NUM); dp->aux.transfer = hibmc_dp_aux_xfer; - dp->aux.is_remote = 0; + dp->aux.name = "HIBMC DRM dp aux"; + dp->aux.drm_dev = dp->drm_dev; drm_dp_aux_init(&dp->aux); + dp->dp_dev->aux = &dp->aux; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h index 2c52a4476c4d..4add05c7f161 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h @@ -13,6 +13,8 @@ #include <linux/io.h> #include <drm/display/drm_dp_helper.h> +#include "dp_hw.h" + #define HIBMC_DP_LANE_NUM_MAX 2 struct hibmc_link_status { @@ -32,12 +34,13 @@ struct hibmc_dp_link { }; struct hibmc_dp_dev { - struct drm_dp_aux aux; + struct drm_dp_aux *aux; struct drm_device *dev; void __iomem *base; struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */ struct hibmc_dp_link link; u8 dpcd[DP_RECEIVER_CAP_SIZE]; + void __iomem *serdes_base; }; #define dp_field_modify(reg_value, mask, val) \ @@ -57,7 +60,10 @@ struct hibmc_dp_dev { mutex_unlock(&_dp->lock); \ } while (0) -void hibmc_dp_aux_init(struct hibmc_dp_dev *dp); +void hibmc_dp_aux_init(struct hibmc_dp *dp); int hibmc_dp_link_training(struct hibmc_dp_dev *dp); +int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp); +int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp); +int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]); #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h index 74dd9956144e..08f9e1caf7fc 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h @@ -15,5 +15,7 @@ #define HIBMC_DP_CLK_EN 0x7 #define HIBMC_DP_SYNC_EN_MASK 0x3 #define HIBMC_DP_LINK_RATE_CAL 27 +#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46) +#define HIBMC_DP_INT_ENABLE 0xc #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c index a8d543881c09..8f0daec7d174 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c @@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size); hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE, HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size); + hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET, + HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION, + HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes)); } static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode) @@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp) { struct drm_device *drm_dev = dp->drm_dev; struct hibmc_dp_dev *dp_dev; + int ret; dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL); if (!dp_dev) @@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp) dp_dev->dev = drm_dev; dp_dev->base = dp->mmio + HIBMC_DP_OFFSET; - hibmc_dp_aux_init(dp_dev); + hibmc_dp_aux_init(dp); + + ret = hibmc_dp_serdes_init(dp_dev); + if (ret) + return ret; dp_dev->link.cap.lanes = 0x2; - dp_dev->link.cap.link_rate = DP_LINK_BW_2_7; + dp_dev->link.cap.link_rate = DP_LINK_BW_8_1; /* hdcp data */ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG); @@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp) return 0; } +void hibmc_dp_enable_int(struct hibmc_dp *dp) +{ + struct hibmc_dp_dev *dp_dev = dp->dp_dev; + + writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE); +} + +void hibmc_dp_disable_int(struct hibmc_dp *dp) +{ + struct hibmc_dp_dev *dp_dev = dp->dp_dev; + + writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE); + writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS); +} + +void hibmc_dp_hpd_cfg(struct hibmc_dp *dp) +{ + struct hibmc_dp_dev *dp_dev = dp->dp_dev; + + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0); + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1); + hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9); + writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG); + writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE); + writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS); + writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE); + writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL); + writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL); +} + void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable) { struct hibmc_dp_dev *dp_dev = dp->dp_dev; @@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode) return 0; } + +void hibmc_dp_reset_link(struct hibmc_dp *dp) +{ + dp->dp_dev->link.status.clock_recovered = false; + dp->dp_dev->link.status.channel_equalized = false; +} + +static const struct hibmc_dp_color_raw g_rgb_raw[] = { + {CBAR_COLOR_BAR, 0x000, 0x000, 0x000}, + {CBAR_WHITE, 0xfff, 0xfff, 0xfff}, + {CBAR_RED, 0xfff, 0x000, 0x000}, + {CBAR_ORANGE, 0xfff, 0x800, 0x000}, + {CBAR_YELLOW, 0xfff, 0xfff, 0x000}, + {CBAR_GREEN, 0x000, 0xfff, 0x000}, + {CBAR_CYAN, 0x000, 0x800, 0x800}, + {CBAR_BLUE, 0x000, 0x000, 0xfff}, + {CBAR_PURPLE, 0x800, 0x000, 0x800}, + {CBAR_BLACK, 0x000, 0x000, 0x000}, +}; + +void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg) +{ + struct hibmc_dp_dev *dp_dev = dp->dp_dev; + struct hibmc_dp_color_raw raw_data; + + if (cfg->enable) { + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9), + cfg->self_timing); + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1), + cfg->dynamic_rate); + if (cfg->pattern == CBAR_COLOR_BAR) { + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0); + } else { + raw_data = g_rgb_raw[cfg->pattern]; + drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value, + raw_data.g_value, raw_data.b_value); + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1); + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12), + raw_data.r_value); + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12), + raw_data.g_value); + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0), + raw_data.b_value); + } + } + + hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable); + writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h index 4dc13b3d9875..665f5b166dfb 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h @@ -10,19 +10,55 @@ #include <drm/drm_encoder.h> #include <drm/drm_connector.h> #include <drm/drm_print.h> +#include <drm/display/drm_dp_helper.h> struct hibmc_dp_dev; +enum hibmc_dp_cbar_pattern { + CBAR_COLOR_BAR, + CBAR_WHITE, + CBAR_RED, + CBAR_ORANGE, + CBAR_YELLOW, + CBAR_GREEN, + CBAR_CYAN, + CBAR_BLUE, + CBAR_PURPLE, + CBAR_BLACK, +}; + +struct hibmc_dp_color_raw { + enum hibmc_dp_cbar_pattern pattern; + u32 r_value; + u32 g_value; + u32 b_value; +}; + +struct hibmc_dp_cbar_cfg { + u8 enable; + u8 self_timing; + u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */ + enum hibmc_dp_cbar_pattern pattern; +}; + struct hibmc_dp { struct hibmc_dp_dev *dp_dev; struct drm_device *drm_dev; struct drm_encoder encoder; struct drm_connector connector; void __iomem *mmio; + struct drm_dp_aux aux; + struct hibmc_dp_cbar_cfg cfg; + u32 irq_status; }; int hibmc_dp_hw_init(struct hibmc_dp *dp); int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode); void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable); +void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg); +void hibmc_dp_reset_link(struct hibmc_dp *dp); +void hibmc_dp_hpd_cfg(struct hibmc_dp *dp); +void hibmc_dp_enable_int(struct hibmc_dp *dp); +void hibmc_dp_disable_int(struct hibmc_dp *dp); #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c index f6355c16cc0a..74f7832ea53e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c @@ -9,6 +9,22 @@ #define HIBMC_EQ_MAX_RETRY 5 +static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp) +{ + switch (dp->link.cap.link_rate) { + case DP_LINK_BW_1_62: + return DP_SERDES_BW_1_62; + case DP_LINK_BW_2_7: + return DP_SERDES_BW_2_7; + case DP_LINK_BW_5_4: + return DP_SERDES_BW_5_4; + case DP_LINK_BW_8_1: + return DP_SERDES_BW_8_1; + default: + return -EINVAL; + } +} + static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp) { u8 buf[2]; @@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp) /* set rate and lane count */ buf[0] = dp->link.cap.link_rate; buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes; - ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf)); + ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf)); if (ret != sizeof(buf)) { drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret); return ret >= 0 ? -EIO : ret; @@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp) /* set 8b/10b and downspread */ buf[0] = DP_SPREAD_AMP_0_5; buf[1] = DP_SET_ANSI_8B10B; - ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf)); + ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf)); if (ret != sizeof(buf)) { drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret); return ret >= 0 ? -EIO : ret; } - ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd); - if (ret) - drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret); - - return ret; + return 0; } static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern) @@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern) hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val); - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf)); + ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf)); if (ret != sizeof(buf)) { drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n"); return ret >= 0 ? -EIO : ret; @@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp) return ret; for (i = 0; i < dp->link.cap.lanes; i++) - train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2; + train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0; + + ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set); + if (ret) + return ret; - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes); + ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes); if (ret != dp->link.cap.lanes) { drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n"); return ret >= 0 ? -EIO : ret; @@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp, return false; } -static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp) +static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp) { + int ret; + switch (dp->link.cap.link_rate) { case DP_LINK_BW_2_7: dp->link.cap.link_rate = DP_LINK_BW_1_62; - return 0; + break; case DP_LINK_BW_5_4: dp->link.cap.link_rate = DP_LINK_BW_2_7; - return 0; + break; case DP_LINK_BW_8_1: dp->link.cap.link_rate = DP_LINK_BW_5_4; - return 0; + break; default: return -EINVAL; } + + ret = hibmc_dp_get_serdes_rate_cfg(dp); + if (ret < 0) + return ret; + + return hibmc_dp_serdes_rate_switch(ret, dp); } static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp) @@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp) switch (dp->link.cap.lanes) { case 0x2: dp->link.cap.lanes--; + drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n"); break; case 0x1: drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n"); @@ -185,10 +210,10 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp) voltage_tries = 1; for (cr_tries = 0; cr_tries < 80; cr_tries++) { - drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd); + drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd); - ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status); - if (ret != DP_LINK_STATUS_SIZE) { + ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status); + if (ret) { drm_err(dp->dev, "Get lane status failed\n"); return ret; } @@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp) } level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status); - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set, + + ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set); + if (ret) + return ret; + + ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set, dp->link.cap.lanes); if (ret != dp->link.cap.lanes) { drm_dbg_dp(dp->dev, "Update link training failed\n"); @@ -233,10 +263,10 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp) return ret; for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) { - drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd); + drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd); - ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status); - if (ret != DP_LINK_STATUS_SIZE) { + ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status); + if (ret) { drm_err(dp->dev, "get lane status failed\n"); break; } @@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp) } hibmc_dp_link_get_adjust_train(dp, lane_status); - ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, + + ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set); + if (ret) + return ret; + + ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set, dp->link.cap.lanes); if (ret != dp->link.cap.lanes) { drm_dbg_dp(dp->dev, "Update link training failed\n"); @@ -295,6 +330,21 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp) struct hibmc_dp_link *link = &dp->link; int ret; + ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd); + if (ret) + drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret); + + dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; + dp->link.cap.lanes = 0x2; + + ret = hibmc_dp_get_serdes_rate_cfg(dp); + if (ret < 0) + return ret; + + ret = hibmc_dp_serdes_rate_switch(ret, dp); + if (ret) + return ret; + while (true) { ret = hibmc_dp_link_training_cr_pre(dp); if (ret) diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h index 4a515c726d52..394b1e933c3a 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h @@ -5,72 +5,128 @@ #define DP_REG_H #define HIBMC_DP_AUX_CMD_ADDR 0x50 + #define HIBMC_DP_AUX_WR_DATA0 0x54 #define HIBMC_DP_AUX_WR_DATA1 0x58 #define HIBMC_DP_AUX_WR_DATA2 0x5c #define HIBMC_DP_AUX_WR_DATA3 0x60 #define HIBMC_DP_AUX_RD_DATA0 0x64 + #define HIBMC_DP_AUX_REQ 0x74 +#define HIBMC_DP_CFG_AUX_REQ BIT(0) +#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1) +#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2) +#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9) + #define HIBMC_DP_AUX_STATUS 0x78 +#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0) +#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4) +#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12) +#define HIBMC_DP_CFG_AUX GENMASK(24, 17) + #define HIBMC_DP_PHYIF_CTRL0 0xa0 +#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0) +#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4) +#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8) + #define HIBMC_DP_VIDEO_CTRL 0x100 +#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1) +#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2) +#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6) +#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7) +#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8) + #define HIBMC_DP_VIDEO_CONFIG0 0x104 +#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16) +#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0) + #define HIBMC_DP_VIDEO_CONFIG1 0x108 +#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16) +#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0) + #define HIBMC_DP_VIDEO_CONFIG2 0x10c +#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0) + #define HIBMC_DP_VIDEO_CONFIG3 0x110 +#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0) +#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16) + #define HIBMC_DP_VIDEO_PACKET 0x114 +#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0) +#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6) +#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20) + #define HIBMC_DP_VIDEO_MSA0 0x118 +#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16) +#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0) + #define HIBMC_DP_VIDEO_MSA1 0x11c #define HIBMC_DP_VIDEO_MSA2 0x120 + #define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124 +#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16) +#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0) + +#define HIBMC_DP_COLOR_BAR_CTRL 0x260 +#define HIBMC_DP_COLOR_BAR_CTRL1 0x264 + #define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c +#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16) +#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0) + #define HIBMC_DP_TIMING_GEN_CONFIG2 0x274 +#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16) +#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0) + #define HIBMC_DP_TIMING_GEN_CONFIG3 0x278 +#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16) + #define HIBMC_DP_HDCP_CFG 0x600 + #define HIBMC_DP_DPTX_RST_CTRL 0x700 +#define HIBMC_DP_CFG_AUX_RST_N BIT(4) + #define HIBMC_DP_DPTX_CLK_CTRL 0x704 + #define HIBMC_DP_DPTX_GCTL0 0x708 +#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1) + #define HIBMC_DP_INTR_ENABLE 0x720 #define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728 + #define HIBMC_DP_TIMING_MODEL_CTRL 0x884 +#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16) + #define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0 -#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1) -#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2) -#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6) -#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9) -#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8) -#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1) -#define HIBMC_DP_CFG_AUX_REQ BIT(0) -#define HIBMC_DP_CFG_AUX_RST_N BIT(4) -#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0) -#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12) -#define HIBMC_DP_CFG_AUX GENMASK(24, 17) -#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4) -#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0) -#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4) -#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16) -#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0) -#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16) -#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0) -#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0) -#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0) -#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0) -#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0) -#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0) -#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8) -#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7) -#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1) -#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2) -#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0) -#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6) -#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16) -#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0) +#define HIBMC_DP_INTSTAT 0x1e0724 +#define HIBMC_DP_INTCLR 0x1e0728 + +/* dp serdes reg */ +#define HIBMC_DP_HOST_OFFSET 0x10000 +#define HIBMC_DP_LANE0_RATE_OFFSET 0x4 +#define HIBMC_DP_LANE1_RATE_OFFSET 0xc +#define HIBMC_DP_LANE_STATUS_OFFSET 0x10 +#define HIBMC_DP_PMA_LANE0_OFFSET 0x18 +#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c +#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c +#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1) +#define DP_SERDES_DONE 0x3 + +/* dp serdes TX-Deempth Configuration */ +#define DP_SERDES_VOL0_PRE0 0x280 +#define DP_SERDES_VOL0_PRE1 0x2300 +#define DP_SERDES_VOL0_PRE2 0x53c0 +#define DP_SERDES_VOL0_PRE3 0x8400 +#define DP_SERDES_VOL1_PRE0 0x380 +#define DP_SERDES_VOL1_PRE1 0x3440 +#define DP_SERDES_VOL1_PRE2 0x6480 +#define DP_SERDES_VOL2_PRE0 0x4c1 +#define DP_SERDES_VOL2_PRE1 0x4500 +#define DP_SERDES_VOL3_PRE0 0x600 +#define DP_SERDES_BW_8_1 0x3 +#define DP_SERDES_BW_5_4 0x2 +#define DP_SERDES_BW_2_7 0x1 +#define DP_SERDES_BW_1_62 0x0 #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c new file mode 100644 index 000000000000..676059d4c1e6 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (c) 2025 Hisilicon Limited. + +#include <linux/delay.h> +#include <drm/drm_device.h> +#include <drm/drm_print.h> +#include "dp_comm.h" +#include "dp_config.h" +#include "dp_reg.h" + +int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]) +{ + static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1, + DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3}, + {DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1, + DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0, + DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}}; + int cfg[2]; + int i; + + for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) { + cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])] + [FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])]; + if (!cfg[i]) + return -EINVAL; + + /* lane1 offset is 4 */ + writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]), + dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4); + } + + usleep_range(300, 500); + + if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) { + drm_dbg_dp(dp->dev, "dp serdes cfg failed\n"); + return -EAGAIN; + } + + return 0; +} + +int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp) +{ + writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET); + writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET); + + usleep_range(300, 500); + + if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) { + drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n"); + return -EAGAIN; + } + + if (rate < DP_SERDES_BW_8_1) + drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n", + rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162); + + return 0; +} + +int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp) +{ + dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET; + + writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0), + dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET); + writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0), + dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET); + + return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c new file mode 100644 index 000000000000..f585387c3a49 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (c) 2024 Hisilicon Limited. + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/seq_file.h> +#include <linux/pci.h> + +#include <drm/drm_drv.h> +#include <drm/drm_file.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_edid.h> + +#include "hibmc_drm_drv.h" + +#define MAX_BUF_SIZE 12 + +static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hibmc_drm_private *priv = file_inode(file)->i_private; + struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg; + int ret, idx; + u8 buf[MAX_BUF_SIZE]; + + if (count >= MAX_BUF_SIZE) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = '\0'; + + /* Only 4 parameters is allowed, the ranger are as follow: + * [0] enable/disable colorbar feature + 0: enable colorbar, 1: disable colorbar + * [1] the timing source of colorbar displaying + 0: timing follows XDP, 1: internal self timing + * [2] the movment of colorbar displaying + 0: static colorbar image, + * 1~255: right shifting a type of color per (1~255)frames + * [3] the color type of colorbar displaying + 0~9: color bar, white, red, orange, + * yellow, green, cyan, bule, pupper, black + */ + if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing, + &cfg->dynamic_rate, &cfg->pattern) != 4) { + return -EINVAL; + } + + if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1) + return -EINVAL; + + ret = drm_dev_enter(&priv->dev, &idx); + if (!ret) + return -ENODEV; + + hibmc_dp_set_cbar(&priv->dp, cfg); + + drm_dev_exit(idx); + + return count; +} + +static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg) +{ + struct hibmc_drm_private *priv = m->private; + struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg; + int idx; + + if (!drm_dev_enter(&priv->dev, &idx)) + return -ENODEV; + + seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing, + cfg->dynamic_rate, cfg->pattern); + + drm_dev_exit(idx); + + return 0; +} + +static int hibmc_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private); +} + +static const struct file_operations hibmc_dbg_fops = { + .owner = THIS_MODULE, + .write = hibmc_control_write, + .read = seq_read, + .open = hibmc_open, + .llseek = seq_lseek, + .release = single_release, +}; + +void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root) +{ + struct drm_device *dev = connector->dev; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); + + /* create the file in drm directory, so we don't need to remove manually */ + debugfs_create_file("colorbar-cfg", 0200, + root, priv, &hibmc_dbg_fops); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c index 603d6b198a54..d06832e62e96 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c @@ -13,27 +13,64 @@ #include "hibmc_drm_drv.h" #include "dp/dp_hw.h" +#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2) + static int hibmc_dp_connector_get_modes(struct drm_connector *connector) { + const struct drm_edid *drm_edid; int count; - count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width, - connector->dev->mode_config.max_height); - drm_set_preferred_mode(connector, 1024, 768); // temporary implementation + drm_edid = drm_edid_read(connector); + + drm_edid_connector_update(connector, drm_edid); + + count = drm_edid_connector_add_modes(connector); + + drm_edid_free(drm_edid); return count; } +static int hibmc_dp_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + mdelay(200); + + return drm_connector_helper_detect_from_ddc(connector, ctx, force); +} + static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = { .get_modes = hibmc_dp_connector_get_modes, + .detect_ctx = hibmc_dp_detect, }; +static int hibmc_dp_late_register(struct drm_connector *connector) +{ + struct hibmc_dp *dp = to_hibmc_dp(connector); + + hibmc_dp_enable_int(dp); + + return drm_dp_aux_register(&dp->aux); +} + +static void hibmc_dp_early_unregister(struct drm_connector *connector) +{ + struct hibmc_dp *dp = to_hibmc_dp(connector); + + drm_dp_aux_unregister(&dp->aux); + + hibmc_dp_disable_int(dp); +} + static const struct drm_connector_funcs hibmc_dp_conn_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = hibmc_dp_late_register, + .early_unregister = hibmc_dp_early_unregister, + .debugfs_init = hibmc_debugfs_init, }; static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode) @@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = { .atomic_disable = hibmc_dp_encoder_disable, }; +irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); + int idx; + + if (!drm_dev_enter(dev, &idx)) + return -ENODEV; + + if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) { + drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n"); + hibmc_dp_hpd_cfg(&priv->dp); + } else { + drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n"); + hibmc_dp_reset_link(&priv->dp); + } + + if (dev->registered) + drm_connector_helper_hpd_irq_event(&priv->dp.connector); + + drm_dev_exit(idx); + + return IRQ_HANDLED; +} + int hibmc_dp_init(struct hibmc_drm_private *priv) { struct drm_device *dev = &priv->dev; @@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv) drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs); - ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs, - DRM_MODE_CONNECTOR_DisplayPort); + ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs, + DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc); if (ret) { drm_err(dev, "init dp connector failed: %d\n", ret); return ret; @@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv) drm_connector_attach_encoder(connector, encoder); + connector->polled = DRM_CONNECTOR_POLL_HPD; + return 0; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index e6de6d5edf6b..768b97f9e74a 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -28,12 +28,12 @@ #include "hibmc_drm_drv.h" #include "hibmc_drm_regs.h" -#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c -#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00 -#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff +#include "dp/dp_reg.h" DEFINE_DRM_GEM_FOPS(hibmc_fops); +static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" }; + static irqreturn_t hibmc_interrupt(int irq, void *arg) { struct drm_device *dev = (struct drm_device *)arg; @@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg) return IRQ_HANDLED; } +static irqreturn_t hibmc_dp_interrupt(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); + u32 status; + + status = readl(priv->mmio + HIBMC_DP_INTSTAT); + if (status) { + priv->dp.irq_status = status; + writel(status, priv->mmio + HIBMC_DP_INTCLR); + return IRQ_WAKE_THREAD; + } + + return IRQ_HANDLED; +} + static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { @@ -121,9 +137,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) return ret; } - /* if DP existed, init DP */ - if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) & - HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) { + /* + * If the serdes reg is readable and is not equal to 0, + * DP block exists and initializes it. + */ + ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL); + if (ret) { ret = hibmc_dp_init(priv); if (ret) drm_err(dev, "failed to init dp: %d\n", ret); @@ -250,15 +269,48 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv) return 0; } -static int hibmc_unload(struct drm_device *dev) +static void hibmc_unload(struct drm_device *dev) { - struct pci_dev *pdev = to_pci_dev(dev->dev); - drm_atomic_helper_shutdown(dev); +} - free_irq(pdev->irq, dev); +static int hibmc_msi_init(struct drm_device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + char name[32] = {0}; + int valid_irq_num; + int irq; + int ret; - pci_disable_msi(to_pci_dev(dev->dev)); + ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS, + HIBMC_MAX_VECTORS, PCI_IRQ_MSI); + if (ret < 0) { + drm_err(dev, "enabling MSI failed: %d\n", ret); + return ret; + } + + valid_irq_num = ret; + + for (int i = 0; i < valid_irq_num; i++) { + snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s", + dev->driver->name, pci_name(pdev), g_irqs_names_map[i]); + + irq = pci_irq_vector(pdev, i); + + if (i) + /* PCI devices require shared interrupts. */ + ret = devm_request_threaded_irq(&pdev->dev, irq, + hibmc_dp_interrupt, + hibmc_dp_hpd_isr, + IRQF_SHARED, name, dev); + else + ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt, + IRQF_SHARED, name, dev); + if (ret) { + drm_err(dev, "install irq failed: %d\n", ret); + return ret; + } + } return 0; } @@ -290,15 +342,10 @@ static int hibmc_load(struct drm_device *dev) goto err; } - ret = pci_enable_msi(pdev); + ret = hibmc_msi_init(dev); if (ret) { - drm_warn(dev, "enabling MSI failed: %d\n", ret); - } else { - /* PCI devices require shared interrupts. */ - ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED, - dev->driver->name, dev); - if (ret) - drm_warn(dev, "install irq failed: %d\n", ret); + drm_err(dev, "hibmc msi init failed, ret:%d\n", ret); + goto err; } /* reset all the states of crtc/plane/encoder/connector */ @@ -374,7 +421,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev) static void hibmc_pci_shutdown(struct pci_dev *pdev) { - drm_atomic_helper_shutdown(pci_get_drvdata(pdev)); + hibmc_pci_remove(pdev); } static const struct pci_device_id hibmc_pci_table[] = { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index d982f1e4b958..274feabe7df0 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -22,6 +22,9 @@ #include "dp/dp_hw.h" +#define HIBMC_MIN_VECTORS 1 +#define HIBMC_MAX_VECTORS 2 + struct hibmc_vdac { struct drm_device *dev; struct drm_encoder encoder; @@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector) return container_of(connector, struct hibmc_vdac, connector); } +static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector) +{ + return container_of(connector, struct hibmc_dp, connector); +} + static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev) { return container_of(dev, struct hibmc_drm_private, dev); @@ -64,4 +72,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector); int hibmc_dp_init(struct hibmc_drm_private *priv); +void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root); + +irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg); + #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index 05e19ea4c9f9..e8a527ede854 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -60,6 +60,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs hibmc_connector_helper_funcs = { .get_modes = hibmc_connector_get_modes, + .detect_ctx = drm_connector_helper_detect_from_ddc, }; static const struct drm_connector_funcs hibmc_connector_funcs = { @@ -127,5 +128,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) drm_connector_attach_encoder(connector, encoder); + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + return 0; } diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 2eea9fb0e76b..e80debdc4176 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = { static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) { struct dsi_hw_ctx *ctx = dsi->ctx; - struct resource *res; ctx->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(ctx->pclk)) { @@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) return PTR_ERR(ctx->pclk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(&pdev->dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) { DRM_ERROR("failed to remap dsi io region\n"); return PTR_ERR(ctx->base); diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 2eb49177ac42..45c4eb008ad5 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = { static void *ade_hw_ctx_alloc(struct platform_device *pdev, struct drm_crtc *crtc) { - struct resource *res; struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node; struct ade_hw_ctx *ctx = NULL; @@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev, return ERR_PTR(-ENOMEM); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) { DRM_ERROR("failed to remap ade io base\n"); return ERR_PTR(-EIO); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index ed05b131ed3a..c8fc271b33b7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -247,6 +247,7 @@ i915-y += \ display/intel_display_power_map.o \ display/intel_display_power_well.o \ display/intel_display_reset.o \ + display/intel_display_rpm.o \ display/intel_display_rps.o \ display/intel_display_snapshot.o \ display/intel_display_wa.o \ diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c index 206818f9ad49..f10c0fb8d2c8 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c @@ -25,6 +25,8 @@ * */ +#include <drm/drm_print.h> + #include "intel_display_types.h" #include "intel_dvo_dev.h" diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c index 10ab3cc73e58..49f02aca818b 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c @@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ +#include <drm/drm_print.h> + #include "intel_display_types.h" #include "intel_dvo_dev.h" diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c index d9c3152d4338..0713b2709412 100644 --- a/drivers/gpu/drm/i915/display/dvo_ivch.c +++ b/drivers/gpu/drm/i915/display/dvo_ivch.c @@ -29,6 +29,8 @@ * */ +#include <drm/drm_print.h> + #include "intel_display_types.h" #include "intel_dvo_dev.h" diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c index 92d32d6b5bce..80b71bd6a837 100644 --- a/drivers/gpu/drm/i915/display/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c @@ -26,6 +26,8 @@ * */ +#include <drm/drm_print.h> + #include "intel_display_types.h" #include "intel_dvo_dev.h" diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c index b42c717085f3..017b617a8069 100644 --- a/drivers/gpu/drm/i915/display/dvo_sil164.c +++ b/drivers/gpu/drm/i915/display/dvo_sil164.c @@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ +#include <drm/drm_print.h> + #include "intel_display_types.h" #include "intel_dvo_dev.h" diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c index 280699438526..ed560e3438db 100644 --- a/drivers/gpu/drm/i915/display/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c @@ -25,6 +25,8 @@ * */ +#include <drm/drm_print.h> + #include "intel_display_types.h" #include "intel_dvo_dev.h" diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 55b9e9bfcc4d..b39aae9165df 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -519,7 +519,7 @@ static void intel_disable_dp(struct intel_atomic_state *state, { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - intel_dp->link_trained = false; + intel_dp->link.active = false; /* * Make sure the panel is off before trying to change the mode. diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 674a0e5f0858..4307e2ed03d9 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -10,6 +10,7 @@ #include "i915_reg.h" #include "intel_color_regs.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_pcode.h" @@ -344,10 +345,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); seq_printf(m, "Enabled by kernel parameter: %s\n", str_yes_no(display->params.enable_ips)); @@ -361,7 +361,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 013295f66d56..5e8344fdfc28 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -630,84 +630,92 @@ vlv_primary_async_flip(struct intel_dsb *dsb, static void bdw_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); + bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); spin_unlock_irq(&i915->irq_lock); } static void bdw_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); + bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); spin_unlock_irq(&i915->irq_lock); } static void ivb_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); + ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ivb_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); + ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ilk_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); + ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ilk_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); + ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void vlv_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); + i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); spin_unlock_irq(&i915->irq_lock); } static void vlv_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); + i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); spin_unlock_irq(&i915->irq_lock); } diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index 7c80e37c1c5f..40751f1547b7 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -9,6 +9,7 @@ #include "i9xx_wm_regs.h" #include "intel_atomic.h" #include "intel_bo.h" +#include "intel_de.h" #include "intel_display.h" #include "intel_display_trace.h" #include "intel_fb.h" @@ -81,13 +82,14 @@ static const struct cxsr_latency cxsr_latency_table[] = { {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ }; -static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915) +static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); int i; for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { const struct cxsr_latency *latency = &cxsr_latency_table[i]; - bool is_desktop = !IS_MOBILE(i915); + bool is_desktop = !display->platform.mobile; if (is_desktop == latency->is_desktop && i915->is_ddr3 == latency->is_ddr3 && @@ -96,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private * return latency; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n", i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq); return NULL; } -static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) +static void chv_set_memory_dvfs(struct intel_display *display, bool enable) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val; vlv_punit_get(dev_priv); @@ -120,14 +123,15 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "timed out waiting for Punit DDR DVFS request\n"); vlv_punit_put(dev_priv); } -static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) +static void chv_set_memory_pm5(struct intel_display *display, bool enable) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val; vlv_punit_get(dev_priv); @@ -145,53 +149,52 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) #define FW_WM(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) -static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable) { - struct intel_display *display = &dev_priv->display; bool was_enabled; u32 val; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); - } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); - } else if (IS_PINEVIEW(dev_priv)) { - val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv)); + if (display->platform.valleyview || display->platform.cherryview) { + was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + intel_de_posting_read(display, FW_BLC_SELF_VLV); + } else if (display->platform.g4x || display->platform.i965gm) { + was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN; + intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); + intel_de_posting_read(display, FW_BLC_SELF); + } else if (display->platform.pineview) { + val = intel_de_read(display, DSPFW3(display)); was_enabled = val & PINEVIEW_SELF_REFRESH_EN; if (enable) val |= PINEVIEW_SELF_REFRESH_EN; else val &= ~PINEVIEW_SELF_REFRESH_EN; - intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val); - intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv)); - } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + intel_de_write(display, DSPFW3(display), val); + intel_de_posting_read(display, DSPFW3(display)); + } else if (display->platform.i945g || display->platform.i945gm) { + was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); - } else if (IS_I915GM(dev_priv)) { + intel_de_write(display, FW_BLC_SELF, val); + intel_de_posting_read(display, FW_BLC_SELF); + } else if (display->platform.i915gm) { /* * FIXME can't find a bit like this for 915G, and * yet it does have the related watermark in * FW_BLC_SELF. What's going on? */ - was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; + was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : _MASKED_BIT_DISABLE(INSTPM_SELF_EN); - intel_uncore_write(&dev_priv->uncore, INSTPM, val); - intel_uncore_posting_read(&dev_priv->uncore, INSTPM); + intel_de_write(display, INSTPM, val); + intel_de_posting_read(display, INSTPM); } else { return false; } trace_intel_memory_cxsr(display, was_enabled, enable); - drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", + drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n", str_enabled_disabled(enable), str_enabled_disabled(was_enabled)); @@ -200,7 +203,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl /** * intel_set_memory_cxsr - Configure CxSR state - * @dev_priv: i915 device + * @display: display device * @enable: Allow vs. disallow CxSR * * Allow or disallow the system to enter a special CxSR @@ -235,17 +238,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl * the hardware w.r.t. HPLL SR when writing to plane registers. * Disallowing just CxSR is sufficient. */ -bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +bool intel_set_memory_cxsr(struct intel_display *display, bool enable) { bool ret; - mutex_lock(&dev_priv->display.wm.wm_mutex); - ret = _intel_set_memory_cxsr(dev_priv, enable); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->display.wm.vlv.cxsr = enable; - else if (IS_G4X(dev_priv)) - dev_priv->display.wm.g4x.cxsr = enable; - mutex_unlock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); + ret = _intel_set_memory_cxsr(display, enable); + if (display->platform.valleyview || display->platform.cherryview) + display->wm.vlv.cxsr = enable; + else if (display->platform.g4x) + display->wm.g4x.cxsr = enable; + mutex_unlock(&display->wm.wm_mutex); return ret; } @@ -271,8 +274,8 @@ static const int pessimal_latency_ns = 5000; static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; enum pipe pipe = crtc->pipe; int sprite0_start, sprite1_start; @@ -280,22 +283,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) switch (pipe) { case PIPE_A: - dsparb = intel_uncore_read(&dev_priv->uncore, - DSPARB(dev_priv)); - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + dsparb = intel_de_read(display, DSPARB(display)); + dsparb2 = intel_de_read(display, DSPARB2); sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); break; case PIPE_B: - dsparb = intel_uncore_read(&dev_priv->uncore, - DSPARB(dev_priv)); - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + dsparb = intel_de_read(display, DSPARB(display)); + dsparb2 = intel_de_read(display, DSPARB2); sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); break; case PIPE_C: - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); + dsparb2 = intel_de_read(display, DSPARB2); + dsparb3 = intel_de_read(display, DSPARB3); sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); break; @@ -310,26 +311,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) fifo_state->plane[PLANE_CURSOR] = 63; } -static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, +static int i9xx_get_fifo_size(struct intel_display *display, enum i9xx_plane_id i9xx_plane) { - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv)); + u32 dsparb = intel_de_read(display, DSPARB(display)); int size; size = dsparb & 0x7f; if (i9xx_plane == PLANE_B) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n", dsparb, plane_name(i9xx_plane), size); return size; } -static int i830_get_fifo_size(struct drm_i915_private *dev_priv, +static int i830_get_fifo_size(struct intel_display *display, enum i9xx_plane_id i9xx_plane) { - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv)); + u32 dsparb = intel_de_read(display, DSPARB(display)); int size; size = dsparb & 0x1ff; @@ -337,22 +338,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv, size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n", dsparb, plane_name(i9xx_plane), size); return size; } -static int i845_get_fifo_size(struct drm_i915_private *dev_priv, +static int i845_get_fifo_size(struct intel_display *display, enum i9xx_plane_id i9xx_plane) { - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv)); + u32 dsparb = intel_de_read(display, DSPARB(display)); int size; size = dsparb & 0x7f; size >>= 2; /* Convert to cachelines */ - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n", dsparb, plane_name(i9xx_plane), size); return size; @@ -537,7 +538,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate, /** * intel_calculate_wm - calculate watermark level - * @i915: the device + * @display: display device * @pixel_rate: pixel clock * @wm: chip FIFO params * @fifo_size: size of the FIFO buffer @@ -555,7 +556,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate, * past the watermark point. If the FIFO drains completely, a FIFO underrun * will occur, and a display engine hang could result. */ -static unsigned int intel_calculate_wm(struct drm_i915_private *i915, +static unsigned int intel_calculate_wm(struct intel_display *display, int pixel_rate, const struct intel_watermark_params *wm, int fifo_size, int cpp, @@ -573,10 +574,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915, latency_ns / 100); entries = DIV_ROUND_UP(entries, wm->cacheline_size) + wm->guard_size; - drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries); + drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries); wm_size = fifo_size - entries; - drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size); + drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size); /* Don't promote wm_size to unsigned... */ if (wm_size > wm->max_wm) @@ -626,11 +627,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc) crtc->config->hw.adjusted_mode.crtc_clock; } -static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) +static struct intel_crtc *single_enabled_crtc(struct intel_display *display) { struct intel_crtc *crtc, *enabled = NULL; - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { if (intel_crtc_active(crtc)) { if (enabled) return NULL; @@ -641,21 +642,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) return enabled; } -static void pnv_update_wm(struct drm_i915_private *dev_priv) +static void pnv_update_wm(struct intel_display *display) { struct intel_crtc *crtc; const struct cxsr_latency *latency; u32 reg; unsigned int wm; - latency = pnv_get_cxsr_latency(dev_priv); + latency = pnv_get_cxsr_latency(display); if (!latency) { - drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n"); - intel_set_memory_cxsr(dev_priv, false); + drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n"); + intel_set_memory_cxsr(display, false); return; } - crtc = single_enabled_crtc(dev_priv); + crtc = single_enabled_crtc(display); if (crtc) { const struct drm_framebuffer *fb = crtc->base.primary->state->fb; @@ -663,47 +664,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) int cpp = fb->format->cpp[0]; /* Display SR */ - wm = intel_calculate_wm(dev_priv, pixel_rate, + wm = intel_calculate_wm(display, pixel_rate, &pnv_display_wm, pnv_display_wm.fifo_size, cpp, latency->display_sr); - reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv)); + reg = intel_de_read(display, DSPFW1(display)); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); - intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg); - drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); + intel_de_write(display, DSPFW1(display), reg); + drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(dev_priv, pixel_rate, + wm = intel_calculate_wm(display, pixel_rate, &pnv_cursor_wm, pnv_display_wm.fifo_size, 4, latency->cursor_sr); - intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv), - DSPFW_CURSOR_SR_MASK, - FW_WM(wm, CURSOR_SR)); + intel_de_rmw(display, DSPFW3(display), + DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR)); /* Display HPLL off SR */ - wm = intel_calculate_wm(dev_priv, pixel_rate, + wm = intel_calculate_wm(display, pixel_rate, &pnv_display_hplloff_wm, pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); - intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv), - DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); + intel_de_rmw(display, DSPFW3(display), + DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); /* cursor HPLL off SR */ - wm = intel_calculate_wm(dev_priv, pixel_rate, + wm = intel_calculate_wm(display, pixel_rate, &pnv_cursor_hplloff_wm, pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); - reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv)); + reg = intel_de_read(display, DSPFW3(display)); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); - intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg); - drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); + intel_de_write(display, DSPFW3(display), reg); + drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg); - intel_set_memory_cxsr(dev_priv, true); + intel_set_memory_cxsr(display, true); } else { - intel_set_memory_cxsr(dev_priv, false); + intel_set_memory_cxsr(display, false); } } @@ -794,53 +794,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) return max(0, tlb_miss); } -static void g4x_write_wm_values(struct drm_i915_private *dev_priv, +static void g4x_write_wm_values(struct intel_display *display, const struct g4x_wm_values *wm) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm); - intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), - FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv), - (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | - FW_WM(wm->sr.fbc, FBC_SR) | - FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), - (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | - FW_WM(wm->sr.cursor, CURSOR_SR) | - FW_WM(wm->hpll.cursor, HPLL_CURSOR) | - FW_WM(wm->hpll.plane, HPLL_SR)); - - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv)); + intel_de_write(display, DSPFW1(display), + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); + intel_de_write(display, DSPFW2(display), + (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | + FW_WM(wm->sr.fbc, FBC_SR) | + FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); + intel_de_write(display, DSPFW3(display), + (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | + FW_WM(wm->sr.cursor, CURSOR_SR) | + FW_WM(wm->hpll.cursor, HPLL_CURSOR) | + FW_WM(wm->hpll.plane, HPLL_SR)); + + intel_de_posting_read(display, DSPFW1(display)); } #define FW_WM_VLV(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) -static void vlv_write_wm_values(struct drm_i915_private *dev_priv, +static void vlv_write_wm_values(struct intel_display *display, const struct vlv_wm_values *wm) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm); - intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), - (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | - (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | - (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | - (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); + intel_de_write(display, VLV_DDL(pipe), + (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | + (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | + (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | + (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); } /* @@ -848,72 +846,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, * high order bits so that there are no out of bounds values * present in the registers during the reprogramming. */ - intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); - intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); - - intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), - FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv), - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), - FW_WM(wm->sr.cursor, CURSOR_SR)); - - if (IS_CHERRYVIEW(dev_priv)) { - intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); - intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); - intel_uncore_write(&dev_priv->uncore, DSPHOWM, - FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); + intel_de_write(display, DSPHOWM, 0); + intel_de_write(display, DSPHOWM1, 0); + intel_de_write(display, DSPFW4, 0); + intel_de_write(display, DSPFW5, 0); + intel_de_write(display, DSPFW6, 0); + + intel_de_write(display, DSPFW1(display), + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); + intel_de_write(display, DSPFW2(display), + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); + intel_de_write(display, DSPFW3(display), + FW_WM(wm->sr.cursor, CURSOR_SR)); + + if (display->platform.cherryview) { + intel_de_write(display, DSPFW7_CHV, + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); + intel_de_write(display, DSPFW8_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); + intel_de_write(display, DSPFW9_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); + intel_de_write(display, DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } else { - intel_uncore_write(&dev_priv->uncore, DSPFW7, - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - intel_uncore_write(&dev_priv->uncore, DSPHOWM, - FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); + intel_de_write(display, DSPFW7, + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); + intel_de_write(display, DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv)); + intel_de_posting_read(display, DSPFW1(display)); } #undef FW_WM_VLV -static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) +static void g4x_setup_wm_latency(struct intel_display *display) { /* all latencies in usec */ - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; + display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; + display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12; + display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; - dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1; + display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1; } static int g4x_plane_fifo_size(enum plane_id plane_id, int level) @@ -962,11 +960,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int level) { + struct intel_display *display = to_intel_display(plane_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; + unsigned int latency = display->wm.pri_latency[level] * 10; unsigned int pixel_rate, htotal, cpp, width, wm; if (latency == 0) @@ -1017,10 +1015,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); bool dirty = false; - for (; level < dev_priv->display.wm.num_levels; level++) { + for (; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; dirty |= raw->plane[plane_id] != value; @@ -1033,13 +1031,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, int level, u16 value) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); bool dirty = false; /* NORMAL level doesn't have an FBC watermark */ level = max(level, G4X_WM_LEVEL_SR); - for (; level < dev_priv->display.wm.num_levels; level++) { + for (; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; dirty |= raw->fbc != value; @@ -1056,8 +1054,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum plane_id plane_id = plane->id; bool dirty = false; int level; @@ -1069,7 +1067,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, goto out; } - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; int wm, max_wm; @@ -1109,7 +1107,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, out: if (dirty) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], @@ -1117,7 +1115,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); if (plane_id == PLANE_PRIMARY) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "FBC watermarks: SR=%d, HPLL=%d\n", crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); @@ -1137,9 +1135,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (level >= dev_priv->display.wm.num_levels) + if (level >= display->wm.num_levels) return false; return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && @@ -1281,7 +1279,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state, static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = @@ -1311,7 +1309,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, max(optimal->wm.plane[plane_id], active->wm.plane[plane_id]); - drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > + drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] > g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); } @@ -1329,23 +1327,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, intermediate->hpll.fbc = max(optimal->hpll.fbc, active->hpll.fbc); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, (intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, (intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, intermediate->sr.fbc > g4x_fbc_fifo_size(1) && intermediate->fbc_en && intermediate->cxsr); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && intermediate->fbc_en && intermediate->hpll_en); @@ -1376,7 +1374,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state, return 0; } -static void g4x_merge_wm(struct drm_i915_private *dev_priv, +static void g4x_merge_wm(struct intel_display *display, struct g4x_wm_values *wm) { struct intel_crtc *crtc; @@ -1386,7 +1384,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, wm->hpll_en = true; wm->fbc_en = true; - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; if (!crtc->active) @@ -1408,7 +1406,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, wm->fbc_en = false; } - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; enum pipe pipe = crtc->pipe; @@ -1420,23 +1418,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, } } -static void g4x_program_watermarks(struct drm_i915_private *dev_priv) +static void g4x_program_watermarks(struct intel_display *display) { - struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; + struct g4x_wm_values *old_wm = &display->wm.g4x; struct g4x_wm_values new_wm = {}; - g4x_merge_wm(dev_priv, &new_wm); + g4x_merge_wm(display, &new_wm); if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) return; if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, false); + _intel_set_memory_cxsr(display, false); - g4x_write_wm_values(dev_priv, &new_wm); + g4x_write_wm_values(display, &new_wm); if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, true); + _intel_set_memory_cxsr(display, true); *old_wm = new_wm; } @@ -1444,30 +1442,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv) static void g4x_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; - g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + g4x_program_watermarks(display); + mutex_unlock(&display->wm.wm_mutex); } static void g4x_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; - g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + g4x_program_watermarks(display); + mutex_unlock(&display->wm.wm_mutex); } /* latency must be in 0.1us units. */ @@ -1486,18 +1484,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate, return ret; } -static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) +static void vlv_setup_wm_latency(struct intel_display *display) { /* all latencies in usec */ - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; + display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; - dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1; + display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1; - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; + if (display->platform.cherryview) { + display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; + display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; - dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; + display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; } } @@ -1505,13 +1503,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int level) { + struct intel_display *display = to_intel_display(plane_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; unsigned int pixel_rate, htotal, cpp, width, wm; - if (dev_priv->display.wm.pri_latency[level] == 0) + if (display->wm.pri_latency[level] == 0) return USHRT_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) @@ -1532,7 +1530,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, wm = 63; } else { wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, - dev_priv->display.wm.pri_latency[level] * 10); + display->wm.pri_latency[level] * 10); } return min_t(unsigned int, wm, USHRT_MAX); @@ -1546,8 +1544,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; @@ -1616,11 +1614,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) fifo_left -= plane_extra; } - drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); + drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0); /* give it all to the first plane if none are active */ if (active_planes == 0) { - drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); + drm_WARN_ON(display->drm, fifo_left != fifo_size); fifo_state->plane[PLANE_PRIMARY] = fifo_left; } @@ -1631,9 +1629,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) static void vlv_invalidate_wms(struct intel_crtc *crtc, struct vlv_wm_state *wm_state, int level) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - for (; level < dev_priv->display.wm.num_levels; level++) { + for (; level < display->wm.num_levels; level++) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) @@ -1659,10 +1657,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); bool dirty = false; - for (; level < dev_priv->display.wm.num_levels; level++) { + for (; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; dirty |= raw->plane[plane_id] != value; @@ -1675,8 +1673,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum plane_id plane_id = plane->id; int level; bool dirty = false; @@ -1686,7 +1684,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, goto out; } - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; int wm = vlv_compute_wm_level(crtc_state, plane_state, level); int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; @@ -1703,7 +1701,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, out: if (dirty) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], @@ -1734,8 +1732,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; @@ -1745,7 +1743,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) int level; /* initially allow all levels */ - wm_state->num_levels = dev_priv->display.wm.num_levels; + wm_state->num_levels = display->wm.num_levels; /* * Note that enabling cxsr with no primary/sprite planes * enabled can wedge the pipe. Hence we only allow cxsr @@ -1755,7 +1753,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) for (level = 0; level < wm_state->num_levels; level++) { const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; + const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1; if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) break; @@ -1855,6 +1853,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state, static void vlv_atomic_update_fifo(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_uncore *uncore = &dev_priv->uncore; const struct intel_crtc_state *crtc_state = @@ -1871,8 +1870,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; - drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); - drm_WARN_ON(&dev_priv->drm, fifo_size != 511); + drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63); + drm_WARN_ON(display->drm, fifo_size != 511); trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); @@ -1889,8 +1888,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, switch (crtc->pipe) { case PIPE_A: - dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv)); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + dsparb = intel_de_read_fw(display, DSPARB(display)); + dsparb2 = intel_de_read_fw(display, DSPARB2); dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | VLV_FIFO(SPRITEB, 0xff)); @@ -1902,12 +1901,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); - intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + intel_de_write_fw(display, DSPARB(display), dsparb); + intel_de_write_fw(display, DSPARB2, dsparb2); break; case PIPE_B: - dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv)); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + dsparb = intel_de_read_fw(display, DSPARB(display)); + dsparb2 = intel_de_read_fw(display, DSPARB2); dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | VLV_FIFO(SPRITED, 0xff)); @@ -1919,12 +1918,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); - intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + intel_de_write_fw(display, DSPARB(display), dsparb); + intel_de_write_fw(display, DSPARB2, dsparb2); break; case PIPE_C: - dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + dsparb3 = intel_de_read_fw(display, DSPARB3); + dsparb2 = intel_de_read_fw(display, DSPARB2); dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | VLV_FIFO(SPRITEF, 0xff)); @@ -1936,14 +1935,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); - intel_uncore_write_fw(uncore, DSPARB3, dsparb3); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + intel_de_write_fw(display, DSPARB3, dsparb3); + intel_de_write_fw(display, DSPARB2, dsparb2); break; default: break; } - intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv)); + intel_de_read_fw(display, DSPARB(display)); spin_unlock(&uncore->lock); } @@ -2018,16 +2017,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state, return 0; } -static void vlv_merge_wm(struct drm_i915_private *dev_priv, +static void vlv_merge_wm(struct intel_display *display, struct vlv_wm_values *wm) { struct intel_crtc *crtc; int num_active_pipes = 0; - wm->level = dev_priv->display.wm.num_levels - 1; + wm->level = display->wm.num_levels - 1; wm->cxsr = true; - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; if (!crtc->active) @@ -2046,7 +2045,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, if (num_active_pipes > 1) wm->level = VLV_WM_LEVEL_PM2; - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; enum pipe pipe = crtc->pipe; @@ -2061,35 +2060,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, } } -static void vlv_program_watermarks(struct drm_i915_private *dev_priv) +static void vlv_program_watermarks(struct intel_display *display) { - struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; + struct vlv_wm_values *old_wm = &display->wm.vlv; struct vlv_wm_values new_wm = {}; - vlv_merge_wm(dev_priv, &new_wm); + vlv_merge_wm(display, &new_wm); if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) return; if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) - chv_set_memory_dvfs(dev_priv, false); + chv_set_memory_dvfs(display, false); if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) - chv_set_memory_pm5(dev_priv, false); + chv_set_memory_pm5(display, false); if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, false); + _intel_set_memory_cxsr(display, false); - vlv_write_wm_values(dev_priv, &new_wm); + vlv_write_wm_values(display, &new_wm); if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, true); + _intel_set_memory_cxsr(display, true); if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) - chv_set_memory_pm5(dev_priv, true); + chv_set_memory_pm5(display, true); if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) - chv_set_memory_dvfs(dev_priv, true); + chv_set_memory_dvfs(display, true); *old_wm = new_wm; } @@ -2097,33 +2096,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv) static void vlv_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; - vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + vlv_program_watermarks(display); + mutex_unlock(&display->wm.wm_mutex); } static void vlv_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; - vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + vlv_program_watermarks(display); + mutex_unlock(&display->wm.wm_mutex); } -static void i965_update_wm(struct drm_i915_private *dev_priv) +static void i965_update_wm(struct intel_display *display) { struct intel_crtc *crtc; int srwm = 1; @@ -2131,7 +2130,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) bool cxsr_enabled; /* Calc sr entries for one plane configs */ - crtc = single_enabled_crtc(dev_priv); + crtc = single_enabled_crtc(display); if (crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 12000; @@ -2152,7 +2151,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) if (srwm < 0) srwm = 1; srwm &= 0x1ff; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "self-refresh entries: %d, wm: %d\n", entries, srwm); @@ -2167,7 +2166,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); @@ -2175,39 +2174,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) } else { cxsr_enabled = false; /* Turn off self refresh if both pipes are enabled */ - intel_set_memory_cxsr(dev_priv, false); + intel_set_memory_cxsr(display, false); } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", srwm); /* 965 has limitations... */ - intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), - FW_WM(srwm, SR) | - FW_WM(8, CURSORB) | - FW_WM(8, PLANEB) | - FW_WM(8, PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv), - FW_WM(8, CURSORA) | - FW_WM(8, PLANEC_OLD)); + intel_de_write(display, DSPFW1(display), + FW_WM(srwm, SR) | + FW_WM(8, CURSORB) | + FW_WM(8, PLANEB) | + FW_WM(8, PLANEA)); + intel_de_write(display, DSPFW2(display), + FW_WM(8, CURSORA) | + FW_WM(8, PLANEC_OLD)); /* update cursor SR watermark */ - intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), - FW_WM(cursor_sr, CURSOR_SR)); + intel_de_write(display, DSPFW3(display), + FW_WM(cursor_sr, CURSOR_SR)); if (cxsr_enabled) - intel_set_memory_cxsr(dev_priv, true); + intel_set_memory_cxsr(display, true); } #undef FW_WM -static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, +static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display, enum i9xx_plane_id i9xx_plane) { - struct intel_display *display = &i915->display; struct intel_plane *plane; - for_each_intel_plane(&i915->drm, plane) { + for_each_intel_plane(display->drm, plane) { if (plane->id == PLANE_PRIMARY && plane->i9xx_plane == i9xx_plane) return intel_crtc_for_pipe(display, plane->pipe); @@ -2216,7 +2214,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, return NULL; } -static void i9xx_update_wm(struct drm_i915_private *dev_priv) +static void i9xx_update_wm(struct intel_display *display) { const struct intel_watermark_params *wm_info; u32 fwater_lo; @@ -2226,29 +2224,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) int planea_wm, planeb_wm; struct intel_crtc *crtc; - if (IS_I945GM(dev_priv)) + if (display->platform.i945gm) wm_info = &i945_wm_info; - else if (DISPLAY_VER(dev_priv) != 2) + else if (DISPLAY_VER(display) != 2) wm_info = &i915_wm_info; else wm_info = &i830_a_wm_info; - if (DISPLAY_VER(dev_priv) == 2) - fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); + if (DISPLAY_VER(display) == 2) + fifo_size = i830_get_fifo_size(display, PLANE_A); else - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); - crtc = intel_crtc_for_plane(dev_priv, PLANE_A); + fifo_size = i9xx_get_fifo_size(display, PLANE_A); + crtc = intel_crtc_for_plane(display, PLANE_A); if (intel_crtc_active(crtc)) { const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(display) == 2) cpp = 4; else cpp = fb->format->cpp[0]; - planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate, + planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); } else { @@ -2257,25 +2255,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) planea_wm = wm_info->max_wm; } - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(display) == 2) wm_info = &i830_bc_wm_info; - if (DISPLAY_VER(dev_priv) == 2) - fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); + if (DISPLAY_VER(display) == 2) + fifo_size = i830_get_fifo_size(display, PLANE_B); else - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); - crtc = intel_crtc_for_plane(dev_priv, PLANE_B); + fifo_size = i9xx_get_fifo_size(display, PLANE_B); + crtc = intel_crtc_for_plane(display, PLANE_B); if (intel_crtc_active(crtc)) { const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(display) == 2) cpp = 4; else cpp = fb->format->cpp[0]; - planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate, + planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); } else { @@ -2284,11 +2282,11 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) planeb_wm = wm_info->max_wm; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - crtc = single_enabled_crtc(dev_priv); - if (IS_I915GM(dev_priv) && crtc) { + crtc = single_enabled_crtc(display); + if (display->platform.i915gm && crtc) { struct drm_gem_object *obj; obj = intel_fb_bo(crtc->base.primary->state->fb); @@ -2304,10 +2302,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) cwm = 2; /* Play safe and disable self-refresh before adjusting watermarks. */ - intel_set_memory_cxsr(dev_priv, false); + intel_set_memory_cxsr(display, false); /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev_priv) && crtc) { + if (HAS_FW_BLC(display) && crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; const struct drm_display_mode *pipe_mode = @@ -2320,7 +2318,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) int cpp; int entries; - if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) + if (display->platform.i915gm || display->platform.i945gm) cpp = 4; else cpp = fb->format->cpp[0]; @@ -2328,20 +2326,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) entries = intel_wm_method2(pixel_rate, htotal, width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; if (srwm < 0) srwm = 1; - if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, - FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + if (display->platform.i945g || display->platform.i945gm) + intel_de_write(display, FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); else - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); + intel_de_write(display, FW_BLC_SELF, srwm & 0x3f); } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", planea_wm, planeb_wm, cwm, srwm); @@ -2352,34 +2350,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fwater_lo = fwater_lo | (1 << 24) | (1 << 8); fwater_hi = fwater_hi | (1 << 8); - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); - intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); + intel_de_write(display, FW_BLC, fwater_lo); + intel_de_write(display, FW_BLC2, fwater_hi); if (crtc) - intel_set_memory_cxsr(dev_priv, true); + intel_set_memory_cxsr(display, true); } -static void i845_update_wm(struct drm_i915_private *dev_priv) +static void i845_update_wm(struct intel_display *display) { struct intel_crtc *crtc; u32 fwater_lo; int planea_wm; - crtc = single_enabled_crtc(dev_priv); + crtc = single_enabled_crtc(display); if (crtc == NULL) return; - planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate, + planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate, &i845_wm_info, - i845_get_fifo_size(dev_priv, PLANE_A), + i845_get_fifo_size(display, PLANE_A), 4, pessimal_latency_ns); - fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; + fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Setting FIFO watermarks - A: %d\n", planea_wm); - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); + intel_de_write(display, FW_BLC, fwater_lo); } /* latency must be in 0.1us units. */ @@ -2534,24 +2532,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, } static unsigned int -ilk_display_fifo_size(const struct drm_i915_private *dev_priv) +ilk_display_fifo_size(struct intel_display *display) { - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) return 3072; - else if (DISPLAY_VER(dev_priv) >= 7) + else if (DISPLAY_VER(display) >= 7) return 768; else return 512; } static unsigned int -ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, +ilk_plane_wm_reg_max(struct intel_display *display, int level, bool is_sprite) { - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) /* BDW primary/sprite plane watermarks */ return level == 0 ? 255 : 2047; - else if (DISPLAY_VER(dev_priv) >= 7) + else if (DISPLAY_VER(display) >= 7) /* IVB/HSW primary/sprite plane watermarks */ return level == 0 ? 127 : 1023; else if (!is_sprite) @@ -2563,30 +2561,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, } static unsigned int -ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) +ilk_cursor_wm_reg_max(struct intel_display *display, int level) { - if (DISPLAY_VER(dev_priv) >= 7) + if (DISPLAY_VER(display) >= 7) return level == 0 ? 63 : 255; else return level == 0 ? 31 : 63; } -static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) +static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display) { - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) return 31; else return 15; } /* Calculate the maximum primary/sprite plane watermark */ -static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, +static unsigned int ilk_plane_wm_max(struct intel_display *display, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, bool is_sprite) { - unsigned int fifo_size = ilk_display_fifo_size(dev_priv); + unsigned int fifo_size = ilk_display_fifo_size(display); /* if sprites aren't enabled, sprites get nothing */ if (is_sprite && !config->sprites_enabled) @@ -2594,14 +2592,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, /* HSW allows LP1+ watermarks even with multiple pipes */ if (level == 0 || config->num_pipes_active > 1) { - fifo_size /= INTEL_NUM_PIPES(dev_priv); + fifo_size /= INTEL_NUM_PIPES(display); /* * For some reason the non self refresh * FIFO size is only half of the self * refresh FIFO size on ILK/SNB. */ - if (DISPLAY_VER(dev_priv) < 7) + if (DISPLAY_VER(display) < 7) fifo_size /= 2; } @@ -2617,11 +2615,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, } /* clamp to max that the registers can hold */ - return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); + return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite)); } /* Calculate the maximum cursor plane watermark */ -static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, +static unsigned int ilk_cursor_wm_max(struct intel_display *display, int level, const struct intel_wm_config *config) { @@ -2630,32 +2628,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, return 64; /* otherwise just report max that registers can hold */ - return ilk_cursor_wm_reg_max(dev_priv, level); + return ilk_cursor_wm_reg_max(display, level); } -static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, +static void ilk_compute_wm_maximums(struct intel_display *display, int level, const struct intel_wm_config *config, enum intel_ddb_partitioning ddb_partitioning, struct ilk_wm_maximums *max) { - max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); - max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); - max->cur = ilk_cursor_wm_max(dev_priv, level, config); - max->fbc = ilk_fbc_wm_reg_max(dev_priv); + max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(display, level, config); + max->fbc = ilk_fbc_wm_reg_max(display); } -static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, +static void ilk_compute_wm_reg_maximums(struct intel_display *display, int level, struct ilk_wm_maximums *max) { - max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); - max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); - max->cur = ilk_cursor_wm_reg_max(dev_priv, level); - max->fbc = ilk_fbc_wm_reg_max(dev_priv); + max->pri = ilk_plane_wm_reg_max(display, level, false); + max->spr = ilk_plane_wm_reg_max(display, level, true); + max->cur = ilk_cursor_wm_reg_max(display, level); + max->fbc = ilk_fbc_wm_reg_max(display); } -static bool ilk_validate_wm_level(struct drm_i915_private *i915, +static bool ilk_validate_wm_level(struct intel_display *display, int level, const struct ilk_wm_maximums *max, struct intel_wm_level *result) @@ -2679,15 +2677,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915, */ if (level == 0 && !result->enable) { if (result->pri_val > max->pri) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Primary WM%d too large %u (max %u)\n", level, result->pri_val, max->pri); if (result->spr_val > max->spr) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Sprite WM%d too large %u (max %u)\n", level, result->spr_val, max->spr); if (result->cur_val > max->cur) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Cursor WM%d too large %u (max %u)\n", level, result->cur_val, max->cur); @@ -2700,7 +2698,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915, return ret; } -static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, +static void ilk_compute_wm_level(struct intel_display *display, const struct intel_crtc *crtc, int level, struct intel_crtc_state *crtc_state, @@ -2709,9 +2707,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_plane_state *curstate, struct intel_wm_level *result) { - u16 pri_latency = dev_priv->display.wm.pri_latency[level]; - u16 spr_latency = dev_priv->display.wm.spr_latency[level]; - u16 cur_latency = dev_priv->display.wm.cur_latency[level]; + u16 pri_latency = display->wm.pri_latency[level]; + u16 spr_latency = display->wm.spr_latency[level]; + u16 cur_latency = display->wm.cur_latency[level]; /* WM1+ latency values stored in 0.5us units */ if (level > 0) { @@ -2735,11 +2733,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, result->enable = true; } -static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +static void hsw_read_wm_latency(struct intel_display *display, u16 wm[]) { + struct drm_i915_private *i915 = to_i915(display->drm); u64 sskpd; - i915->display.wm.num_levels = 5; + display->wm.num_levels = 5; sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); @@ -2752,11 +2751,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); } -static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +static void snb_read_wm_latency(struct intel_display *display, u16 wm[]) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 sskpd; - i915->display.wm.num_levels = 4; + display->wm.num_levels = 4; sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); @@ -2766,11 +2766,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); } -static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +static void ilk_read_wm_latency(struct intel_display *display, u16 wm[]) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 mltr; - i915->display.wm.num_levels = 3; + display->wm.num_levels = 3; mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); @@ -2780,24 +2781,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); } -static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5]) +static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ - if (DISPLAY_VER(dev_priv) == 5) + if (DISPLAY_VER(display) == 5) wm[0] = 13; } -static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5]) +static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ - if (DISPLAY_VER(dev_priv) == 5) + if (DISPLAY_VER(display) == 5) wm[0] = 13; } -static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5], u16 min) +static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min) { int level; @@ -2805,13 +2803,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, return false; wm[0] = max(wm[0], min); - for (level = 1; level < dev_priv->display.wm.num_levels; level++) + for (level = 1; level < display->wm.num_levels; level++) wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); return true; } -static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) +static void snb_wm_latency_quirk(struct intel_display *display) { bool changed; @@ -2819,21 +2817,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); + changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12); if (!changed) return; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "WM latency values increased to avoid potential underruns\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); + intel_print_wm_latency(display, "Primary", display->wm.pri_latency); + intel_print_wm_latency(display, "Sprite", display->wm.spr_latency); + intel_print_wm_latency(display, "Cursor", display->wm.cur_latency); } -static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) +static void snb_wm_lp3_irq_quirk(struct intel_display *display) { /* * On some SNB machines (Thinkpad X220 Tablet at least) @@ -2846,50 +2844,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) * interrupts only. To play it safe we disable LP3 * watermarks entirely. */ - if (dev_priv->display.wm.pri_latency[3] == 0 && - dev_priv->display.wm.spr_latency[3] == 0 && - dev_priv->display.wm.cur_latency[3] == 0) + if (display->wm.pri_latency[3] == 0 && + display->wm.spr_latency[3] == 0 && + display->wm.cur_latency[3] == 0) return; - dev_priv->display.wm.pri_latency[3] = 0; - dev_priv->display.wm.spr_latency[3] = 0; - dev_priv->display.wm.cur_latency[3] = 0; + display->wm.pri_latency[3] = 0; + display->wm.spr_latency[3] = 0; + display->wm.cur_latency[3] = 0; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "LP3 watermarks disabled due to potential for lost interrupts\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); + intel_print_wm_latency(display, "Primary", display->wm.pri_latency); + intel_print_wm_latency(display, "Sprite", display->wm.spr_latency); + intel_print_wm_latency(display, "Cursor", display->wm.cur_latency); } -static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) +static void ilk_setup_wm_latency(struct intel_display *display) { - if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - else if (DISPLAY_VER(dev_priv) >= 6) - snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + if (display->platform.broadwell || display->platform.haswell) + hsw_read_wm_latency(display, display->wm.pri_latency); + else if (DISPLAY_VER(display) >= 6) + snb_read_wm_latency(display, display->wm.pri_latency); else - ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + ilk_read_wm_latency(display, display->wm.pri_latency); - memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, - sizeof(dev_priv->display.wm.pri_latency)); - memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, - sizeof(dev_priv->display.wm.pri_latency)); + memcpy(display->wm.spr_latency, display->wm.pri_latency, + sizeof(display->wm.pri_latency)); + memcpy(display->wm.cur_latency, display->wm.pri_latency, + sizeof(display->wm.pri_latency)); - intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); - intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); + intel_fixup_spr_wm_latency(display, display->wm.spr_latency); + intel_fixup_cur_wm_latency(display, display->wm.cur_latency); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); + intel_print_wm_latency(display, "Primary", display->wm.pri_latency); + intel_print_wm_latency(display, "Sprite", display->wm.spr_latency); + intel_print_wm_latency(display, "Cursor", display->wm.cur_latency); - if (DISPLAY_VER(dev_priv) == 6) { - snb_wm_latency_quirk(dev_priv); - snb_wm_lp3_irq_quirk(dev_priv); + if (DISPLAY_VER(display) == 6) { + snb_wm_latency_quirk(display); + snb_wm_lp3_irq_quirk(display); } } -static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv, +static bool ilk_validate_pipe_wm(struct intel_display *display, struct intel_pipe_wm *pipe_wm) { /* LP0 watermark maximums depend on this pipe alone */ @@ -2901,11 +2899,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv, struct ilk_wm_maximums max; /* LP0 watermarks always use 1/2 DDB partitioning */ - ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); + ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max); /* At least LP0 must be valid */ - if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) { - drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); + if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) { + drm_dbg_kms(display->drm, "LP0 watermark invalid\n"); return false; } @@ -2916,7 +2914,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv, static int ilk_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_pipe_wm *pipe_wm; @@ -2943,10 +2941,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); - usable_level = dev_priv->display.wm.num_levels - 1; + usable_level = display->wm.num_levels - 1; /* ILK/SNB: LP2+ watermarks only w/o sprites */ - if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled) + if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled) usable_level = 1; /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ @@ -2954,18 +2952,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, usable_level = 0; memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, + ilk_compute_wm_level(display, crtc, 0, crtc_state, pristate, sprstate, curstate, &pipe_wm->wm[0]); - if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) + if (!ilk_validate_pipe_wm(display, pipe_wm)) return -EINVAL; - ilk_compute_wm_reg_maximums(dev_priv, 1, &max); + ilk_compute_wm_reg_maximums(display, 1, &max); for (level = 1; level <= usable_level; level++) { struct intel_wm_level *wm = &pipe_wm->wm[level]; - ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, + ilk_compute_wm_level(display, crtc, level, crtc_state, pristate, sprstate, curstate, wm); /* @@ -2973,7 +2971,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, * register maximums since such watermarks are * always invalid. */ - if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) { + if (!ilk_validate_wm_level(display, level, &max, wm)) { memset(wm, 0, sizeof(*wm)); break; } @@ -2990,7 +2988,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_crtc_state *old_crtc_state = @@ -3015,7 +3013,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, intermediate->sprites_enabled |= active->sprites_enabled; intermediate->sprites_scaled |= active->sprites_scaled; - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { struct intel_wm_level *intermediate_wm = &intermediate->wm[level]; const struct intel_wm_level *active_wm = &active->wm[level]; @@ -3036,7 +3034,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, * there's no safe way to transition from the old state to * the new state, so we need to fail the atomic transaction. */ - if (!ilk_validate_pipe_wm(dev_priv, intermediate)) + if (!ilk_validate_pipe_wm(display, intermediate)) return -EINVAL; /* @@ -3068,7 +3066,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state, /* * Merge the watermarks from all active pipes for a specific level. */ -static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, +static void ilk_merge_wm_level(struct intel_display *display, int level, struct intel_wm_level *ret_wm) { @@ -3076,7 +3074,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, ret_wm->enable = true; - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct intel_pipe_wm *active = &crtc->wm.active.ilk; const struct intel_wm_level *wm = &active->wm[level]; @@ -3101,31 +3099,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, /* * Merge all low power watermarks for all active pipes. */ -static void ilk_wm_merge(struct drm_i915_private *dev_priv, +static void ilk_wm_merge(struct intel_display *display, const struct intel_wm_config *config, const struct ilk_wm_maximums *max, struct intel_pipe_wm *merged) { - int level, num_levels = dev_priv->display.wm.num_levels; + int level, num_levels = display->wm.num_levels; int last_enabled_level = num_levels - 1; /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ - if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) && + if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) && config->num_pipes_active > 1) last_enabled_level = 0; /* ILK: FBC WM must be disabled always */ - merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; + merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6; /* merge each WM1+ level */ for (level = 1; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; - ilk_merge_wm_level(dev_priv, level, wm); + ilk_merge_wm_level(display, level, wm); if (level > last_enabled_level) wm->enable = false; - else if (!ilk_validate_wm_level(dev_priv, level, max, wm)) + else if (!ilk_validate_wm_level(display, level, max, wm)) /* make sure all following levels get disabled */ last_enabled_level = level - 1; @@ -3141,8 +3139,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, } /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ - if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && - dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) { + if (DISPLAY_VER(display) == 5 && HAS_FBC(display) && + display->params.enable_fbc && !merged->fbc_wm_enabled) { for (level = 2; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -3158,16 +3156,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) } /* The value we need to program into the WM_LPx latency field */ -static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, +static unsigned int ilk_wm_lp_latency(struct intel_display *display, int level) { - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (display->platform.haswell || display->platform.broadwell) return 2 * level; else - return dev_priv->display.wm.pri_latency[level]; + return display->wm.pri_latency[level]; } -static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, +static void ilk_compute_wm_results(struct intel_display *display, const struct intel_pipe_wm *merged, enum intel_ddb_partitioning partitioning, struct ilk_wm_values *results) @@ -3191,14 +3189,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * disabled. Doing otherwise could cause underruns. */ results->wm_lp[wm_lp - 1] = - WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | + WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) | WM_LP_PRIMARY(r->pri_val) | WM_LP_CURSOR(r->cur_val); if (r->enable) results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); else results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); @@ -3209,19 +3207,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the * level is disabled. Doing otherwise could cause underruns. */ - if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) { - drm_WARN_ON(&dev_priv->drm, wm_lp != 1); + if (DISPLAY_VER(display) < 7 && r->spr_val) { + drm_WARN_ON(display->drm, wm_lp != 1); results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; } } /* LP0 register values */ - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { enum pipe pipe = crtc->pipe; const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; const struct intel_wm_level *r = &pipe_wm->wm[0]; - if (drm_WARN_ON(&dev_priv->drm, !r->enable)) + if (drm_WARN_ON(display->drm, !r->enable)) continue; results->wm_pipe[pipe] = @@ -3236,13 +3234,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * case both are at the same level. Prefer r1 in case they're the same. */ static struct intel_pipe_wm * -ilk_find_best_result(struct drm_i915_private *dev_priv, +ilk_find_best_result(struct intel_display *display, struct intel_pipe_wm *r1, struct intel_pipe_wm *r2) { int level, level1 = 0, level2 = 0; - for (level = 1; level < dev_priv->display.wm.num_levels; level++) { + for (level = 1; level < display->wm.num_levels; level++) { if (r1->wm[level].enable) level1 = level; if (r2->wm[level].enable) @@ -3268,7 +3266,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv, #define WM_DIRTY_FBC (1 << 24) #define WM_DIRTY_DDB (1 << 25) -static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, +static unsigned int ilk_compute_wm_dirty(struct intel_display *display, const struct ilk_wm_values *old, const struct ilk_wm_values *new) { @@ -3276,7 +3274,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, enum pipe pipe; int wm_lp; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { dirty |= WM_DIRTY_PIPE(pipe); /* Must disable LP1+ watermarks too */ @@ -3314,25 +3312,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, return dirty; } -static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, +static bool _ilk_disable_lp_wm(struct intel_display *display, unsigned int dirty) { - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; + struct ilk_wm_values *previous = &display->wm.hw; bool changed = false; if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { previous->wm_lp[2] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); + intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]); changed = true; } if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { previous->wm_lp[1] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); + intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]); changed = true; } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { previous->wm_lp[0] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); + intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]); changed = true; } @@ -3348,73 +3346,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, * The spec says we shouldn't write when we don't need, because every write * causes WMs to be re-evaluated, expending some power. */ -static void ilk_write_wm_values(struct drm_i915_private *dev_priv, +static void ilk_write_wm_values(struct intel_display *display, struct ilk_wm_values *results) { - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; + struct ilk_wm_values *previous = &display->wm.hw; unsigned int dirty; - dirty = ilk_compute_wm_dirty(dev_priv, previous, results); + dirty = ilk_compute_wm_dirty(display, previous, results); if (!dirty) return; - _ilk_disable_lp_wm(dev_priv, dirty); + _ilk_disable_lp_wm(display, dirty); if (dirty & WM_DIRTY_PIPE(PIPE_A)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); + intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); if (dirty & WM_DIRTY_PIPE(PIPE_B)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); + intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); if (dirty & WM_DIRTY_PIPE(PIPE_C)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); + intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); if (dirty & WM_DIRTY_DDB) { - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : - WM_MISC_DATA_PARTITION_5_6); + if (display->platform.haswell || display->platform.broadwell) + intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6, + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : + WM_MISC_DATA_PARTITION_5_6); else - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : - DISP_DATA_PARTITION_5_6); + intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : + DISP_DATA_PARTITION_5_6); } if (dirty & WM_DIRTY_FBC) - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, - results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); + intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS, + results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) - intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); + intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]); - if (DISPLAY_VER(dev_priv) >= 7) { + if (DISPLAY_VER(display) >= 7) { if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) - intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); + intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) - intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); + intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]); } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); + intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]); if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); + intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); + intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]); - dev_priv->display.wm.hw = *results; + display->wm.hw = *results; } -bool ilk_disable_cxsr(struct drm_i915_private *dev_priv) +bool ilk_disable_cxsr(struct intel_display *display) { - return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); + return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL); } -static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, +static void ilk_compute_wm_config(struct intel_display *display, struct intel_wm_config *config) { struct intel_crtc *crtc; /* Compute the currently _active_ config */ - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; if (!wm->pipe_enabled) @@ -3426,7 +3424,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, } } -static void ilk_program_watermarks(struct drm_i915_private *dev_priv) +static void ilk_program_watermarks(struct intel_display *display) { struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; @@ -3434,18 +3432,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) struct ilk_wm_values results = {}; enum intel_ddb_partitioning partitioning; - ilk_compute_wm_config(dev_priv, &config); + ilk_compute_wm_config(display, &config); - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); + ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(display, &config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ - if (DISPLAY_VER(dev_priv) >= 7 && + if (DISPLAY_VER(display) >= 7 && config.num_pipes_active == 1 && config.sprites_enabled) { - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); + ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(display, &config, &max, &lp_wm_5_6); - best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); + best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6); } else { best_lp_wm = &lp_wm_1_2; } @@ -3453,50 +3451,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) partitioning = (best_lp_wm == &lp_wm_1_2) ? INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; - ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); + ilk_compute_wm_results(display, best_lp_wm, partitioning, &results); - ilk_write_wm_values(dev_priv, &results); + ilk_write_wm_values(display, &results); } static void ilk_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; - ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + ilk_program_watermarks(display); + mutex_unlock(&display->wm.wm_mutex); } static void ilk_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; - ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + ilk_program_watermarks(display); + mutex_unlock(&display->wm.wm_mutex); } static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; + struct intel_display *display = to_intel_display(crtc); + struct ilk_wm_values *hw = &display->wm.hw; struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; - hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); + hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe)); memset(active, 0, sizeof(*active)); @@ -3523,7 +3520,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) * should be marked as enabled but zeroed, * which is what we'd compute them to. */ - for (level = 0; level < dev_priv->display.wm.num_levels; level++) + for (level = 0; level < display->wm.num_levels; level++) active->wm[level].enable = true; } @@ -3572,7 +3569,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state) * through the atomic check code to calculate new watermark values in the * state object. */ -void ilk_wm_sanitize(struct drm_i915_private *dev_priv) +void ilk_wm_sanitize(struct intel_display *display) { struct drm_atomic_state *state; struct intel_atomic_state *intel_state; @@ -3583,14 +3580,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv) int i; /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->display.funcs.wm->optimize_watermarks) + if (!display->funcs.wm->optimize_watermarks) return; - if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9)) + if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9)) return; - state = drm_atomic_state_alloc(&dev_priv->drm); - if (drm_WARN_ON(&dev_priv->drm, !state)) + state = drm_atomic_state_alloc(display->drm); + if (drm_WARN_ON(display->drm, !state)) return; intel_state = to_intel_atomic_state(state); @@ -3606,14 +3603,14 @@ retry: * intermediate watermarks (since we don't trust the current * watermarks). */ - if (!HAS_GMCH(dev_priv)) + if (!HAS_GMCH(display)) intel_state->skip_intermediate_wm = true; ret = ilk_sanitize_watermarks_add_affected(state); if (ret) goto fail; - ret = intel_atomic_check(&dev_priv->drm, state); + ret = intel_atomic_check(display->drm, state); if (ret) goto fail; @@ -3643,7 +3640,7 @@ fail: * If this actually happens, we'll have to just leave the * BIOS-programmed watermarks untouched and hope for the best. */ - drm_WARN(&dev_priv->drm, ret, + drm_WARN(display->drm, ret, "Could not determine valid watermarks for inherited state\n"); drm_atomic_state_put(state); @@ -3657,18 +3654,18 @@ fail: #define _FW_WM_VLV(value, plane) \ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) -static void g4x_read_wm_values(struct drm_i915_private *dev_priv, +static void g4x_read_wm_values(struct intel_display *display, struct g4x_wm_values *wm) { u32 tmp; - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv)); + tmp = intel_de_read(display, DSPFW1(display)); wm->sr.plane = _FW_WM(tmp, SR); wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv)); + tmp = intel_de_read(display, DSPFW2(display)); wm->fbc_en = tmp & DSPFW_FBC_SR_EN; wm->sr.fbc = _FW_WM(tmp, FBC_SR); wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); @@ -3676,21 +3673,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv, wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv)); + tmp = intel_de_read(display, DSPFW3(display)); wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); wm->hpll.plane = _FW_WM(tmp, HPLL_SR); } -static void vlv_read_wm_values(struct drm_i915_private *dev_priv, +static void vlv_read_wm_values(struct intel_display *display, struct vlv_wm_values *wm) { enum pipe pipe; u32 tmp; - for_each_pipe(dev_priv, pipe) { - tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); + for_each_pipe(display, pipe) { + tmp = intel_de_read(display, VLV_DDL(pipe)); wm->ddl[pipe].plane[PLANE_PRIMARY] = (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); @@ -3702,34 +3699,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); } - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv)); + tmp = intel_de_read(display, DSPFW1(display)); wm->sr.plane = _FW_WM(tmp, SR); wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv)); + tmp = intel_de_read(display, DSPFW2(display)); wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv)); + tmp = intel_de_read(display, DSPFW3(display)); wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); - if (IS_CHERRYVIEW(dev_priv)) { - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); + if (display->platform.cherryview) { + tmp = intel_de_read(display, DSPFW7_CHV); wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); + tmp = intel_de_read(display, DSPFW8_CHV); wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); + tmp = intel_de_read(display, DSPFW9_CHV); wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); + tmp = intel_de_read(display, DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; @@ -3741,11 +3738,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; } else { - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); + tmp = intel_de_read(display, DSPFW7); wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); + tmp = intel_de_read(display, DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; @@ -3759,16 +3756,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, #undef _FW_WM #undef _FW_WM_VLV -static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) +static void g4x_wm_get_hw_state(struct intel_display *display) { - struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; + struct g4x_wm_values *wm = &display->wm.g4x; struct intel_crtc *crtc; - g4x_read_wm_values(dev_priv, wm); + g4x_read_wm_values(display, wm); - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN; - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct g4x_wm_state *active = &crtc->wm.active.g4x; @@ -3833,7 +3830,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state->wm.g4x.optimal = *active; crtc_state->wm.g4x.intermediate = *active; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", pipe_name(pipe), wm->pipe[pipe].plane[PLANE_PRIMARY], @@ -3841,26 +3838,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) wm->pipe[pipe].plane[PLANE_SPRITE0]); } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", wm->sr.plane, wm->sr.cursor, wm->sr.fbc); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); - drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", + drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n", str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), str_yes_no(wm->fbc_en)); } -static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) +static void g4x_wm_sanitize(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; struct intel_plane *plane; struct intel_crtc *crtc; - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); - for_each_intel_plane(&dev_priv->drm, plane) { + for_each_intel_plane(display->drm, plane) { struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); struct intel_crtc_state *crtc_state = @@ -3873,7 +3869,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) if (plane_state->uapi.visible) continue; - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; @@ -3884,36 +3880,37 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) } } - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); int ret; ret = _g4x_compute_pipe_wm(crtc_state); - drm_WARN_ON(&dev_priv->drm, ret); + drm_WARN_ON(display->drm, ret); crtc_state->wm.g4x.intermediate = crtc_state->wm.g4x.optimal; crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; } - g4x_program_watermarks(dev_priv); + g4x_program_watermarks(display); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + mutex_unlock(&display->wm.wm_mutex); } -static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) +static void vlv_wm_get_hw_state(struct intel_display *display) { - struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct vlv_wm_values *wm = &display->wm.vlv; struct intel_crtc *crtc; u32 val; - vlv_read_wm_values(dev_priv, wm); + vlv_read_wm_values(display, wm); - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; wm->level = VLV_WM_LEVEL_PM2; - if (IS_CHERRYVIEW(dev_priv)) { + if (display->platform.cherryview) { vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); @@ -3935,10 +3932,10 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); - dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1; + display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if ((val & FORCE_DDR_HIGH_FREQ) == 0) @@ -3948,7 +3945,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) vlv_punit_put(dev_priv); } - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct vlv_wm_state *active = &crtc->wm.active.vlv; @@ -3988,7 +3985,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state->wm.vlv.optimal = *active; crtc_state->wm.vlv.intermediate = *active; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", pipe_name(pipe), wm->pipe[pipe].plane[PLANE_PRIMARY], @@ -3997,20 +3994,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) wm->pipe[pipe].plane[PLANE_SPRITE1]); } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); } -static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) +static void vlv_wm_sanitize(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; struct intel_plane *plane; struct intel_crtc *crtc; - mutex_lock(&dev_priv->display.wm.wm_mutex); + mutex_lock(&display->wm.wm_mutex); - for_each_intel_plane(&dev_priv->drm, plane) { + for_each_intel_plane(display->drm, plane) { struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); struct intel_crtc_state *crtc_state = @@ -4023,7 +4019,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) if (plane_state->uapi.visible) continue; - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; @@ -4031,33 +4027,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) } } - for_each_intel_crtc(&dev_priv->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); int ret; ret = _vlv_compute_pipe_wm(crtc_state); - drm_WARN_ON(&dev_priv->drm, ret); + drm_WARN_ON(display->drm, ret); crtc_state->wm.vlv.intermediate = crtc_state->wm.vlv.optimal; crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; } - vlv_program_watermarks(dev_priv); + vlv_program_watermarks(display); - mutex_unlock(&dev_priv->display.wm.wm_mutex); + mutex_unlock(&display->wm.wm_mutex); } /* * FIXME should probably kill this and improve * the real watermark readout/sanitation instead */ -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +static void ilk_init_lp_watermarks(struct intel_display *display) { - intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); - intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); - intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); + intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0); + intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0); + intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0); /* * Don't touch WM_LP_SPRITE_ENABLE here. @@ -4065,37 +4061,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) */ } -static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) +static void ilk_wm_get_hw_state(struct intel_display *display) { - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; + struct ilk_wm_values *hw = &display->wm.hw; struct intel_crtc *crtc; - ilk_init_lp_watermarks(dev_priv); + ilk_init_lp_watermarks(display); - for_each_intel_crtc(&dev_priv->drm, crtc) + for_each_intel_crtc(display->drm, crtc) ilk_pipe_wm_get_hw_state(crtc); - hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); - hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); - hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); + hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK); + hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK); + hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK); - hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); - if (DISPLAY_VER(dev_priv) >= 7) { - hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); - hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); + hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK); + if (DISPLAY_VER(display) >= 7) { + hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB); + hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB); } - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & + if (display->platform.haswell || display->platform.broadwell) + hw->partitioning = (intel_de_read(display, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; - else if (IS_IVYBRIDGE(dev_priv)) - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & + else if (display->platform.ivybridge) + hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; hw->enable_fbc_wm = - !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); + !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS); } static const struct intel_wm_funcs ilk_wm_funcs = { @@ -4145,39 +4141,41 @@ static const struct intel_wm_funcs i845_wm_funcs = { static const struct intel_wm_funcs nop_funcs = { }; -void i9xx_wm_init(struct drm_i915_private *dev_priv) +void i9xx_wm_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev_priv)) { - ilk_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &ilk_wm_funcs; - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - vlv_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &vlv_wm_funcs; - } else if (IS_G4X(dev_priv)) { - g4x_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &g4x_wm_funcs; - } else if (IS_PINEVIEW(dev_priv)) { - if (!pnv_get_cxsr_latency(dev_priv)) { - drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n"); + ilk_setup_wm_latency(display); + display->funcs.wm = &ilk_wm_funcs; + } else if (display->platform.valleyview || display->platform.cherryview) { + vlv_setup_wm_latency(display); + display->funcs.wm = &vlv_wm_funcs; + } else if (display->platform.g4x) { + g4x_setup_wm_latency(display); + display->funcs.wm = &g4x_wm_funcs; + } else if (display->platform.pineview) { + if (!pnv_get_cxsr_latency(display)) { + drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n"); /* Disable CxSR and never update its watermark again */ - intel_set_memory_cxsr(dev_priv, false); - dev_priv->display.funcs.wm = &nop_funcs; + intel_set_memory_cxsr(display, false); + display->funcs.wm = &nop_funcs; } else { - dev_priv->display.funcs.wm = &pnv_wm_funcs; + display->funcs.wm = &pnv_wm_funcs; } - } else if (DISPLAY_VER(dev_priv) == 4) { - dev_priv->display.funcs.wm = &i965_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 3) { - dev_priv->display.funcs.wm = &i9xx_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 2) { - if (INTEL_NUM_PIPES(dev_priv) == 1) - dev_priv->display.funcs.wm = &i845_wm_funcs; + } else if (DISPLAY_VER(display) == 4) { + display->funcs.wm = &i965_wm_funcs; + } else if (DISPLAY_VER(display) == 3) { + display->funcs.wm = &i9xx_wm_funcs; + } else if (DISPLAY_VER(display) == 2) { + if (INTEL_NUM_PIPES(display) == 1) + display->funcs.wm = &i845_wm_funcs; else - dev_priv->display.funcs.wm = &i9xx_wm_funcs; + display->funcs.wm = &i9xx_wm_funcs; } else { - drm_err(&dev_priv->drm, + drm_err(display->drm, "unexpected fall-through in %s\n", __func__); - dev_priv->display.funcs.wm = &nop_funcs; + display->funcs.wm = &nop_funcs; } } diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h index 06ac37c6c94b..7bb363b2a756 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.h +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -8,28 +8,28 @@ #include <linux/types.h> -struct drm_i915_private; struct intel_crtc_state; +struct intel_display; struct intel_plane_state; #ifdef I915 -bool ilk_disable_cxsr(struct drm_i915_private *i915); -void ilk_wm_sanitize(struct drm_i915_private *i915); -bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); -void i9xx_wm_init(struct drm_i915_private *i915); +bool ilk_disable_cxsr(struct intel_display *display); +void ilk_wm_sanitize(struct intel_display *display); +bool intel_set_memory_cxsr(struct intel_display *display, bool enable); +void i9xx_wm_init(struct intel_display *display); #else -static inline bool ilk_disable_cxsr(struct drm_i915_private *i915) +static inline bool ilk_disable_cxsr(struct intel_display *display) { return false; } -static inline void ilk_wm_sanitize(struct drm_i915_private *i915) +static inline void ilk_wm_sanitize(struct intel_display *display) { } -static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable) +static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable) { return false; } -static inline void i9xx_wm_init(struct drm_i915_private *i915) +static inline void i9xx_wm_init(struct intel_display *display) { } #endif diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 402b7b2e1829..ca7033251e91 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -29,6 +29,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fixed.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "i915_reg.h" @@ -1826,107 +1827,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { .transfer = gen11_dsi_host_transfer, }; -#define ICL_PREPARE_CNT_MAX 0x7 -#define ICL_CLK_ZERO_CNT_MAX 0xf -#define ICL_TRAIL_CNT_MAX 0x7 -#define ICL_TCLK_PRE_CNT_MAX 0x3 -#define ICL_TCLK_POST_CNT_MAX 0x7 -#define ICL_HS_ZERO_CNT_MAX 0xf -#define ICL_EXIT_ZERO_CNT_MAX 0x7 - static void icl_dphy_param_init(struct intel_dsi *intel_dsi) { - struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns; - u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; - u32 ths_prepare_ns, tclk_trail_ns; - u32 hs_zero_cnt; - u32 tclk_pre_cnt; + u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk; + u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk; tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); - tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); - ths_prepare_ns = max(mipi_config->ths_prepare, - mipi_config->tclk_prepare); - /* - * prepare cnt in escape clocks - * this field represents a hexadecimal value with a precision - * of 1.2 – i.e. the most significant bit is the integer - * and the least significant 2 bits are fraction bits. - * so, the field can represent a range of 0.25 to 1.75 + * The clock and data lane prepare timing parameters are in expressed in + * units of 1/4 escape clocks, and all the other timings parameters in + * escape clocks. */ - prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); - if (prepare_cnt > ICL_PREPARE_CNT_MAX) { - drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n", - prepare_cnt); - prepare_cnt = ICL_PREPARE_CNT_MAX; - } + tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns); + tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7); - /* clk zero count in escape clocks */ - clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - - ths_prepare_ns, tlpx_ns); - if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { - drm_dbg_kms(display->drm, - "clk_zero_cnt out of range (%d)\n", clk_zero_cnt); - clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; - } + tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - + mipi_config->tclk_prepare, tlpx_ns); + tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15); - /* trail cnt in escape clocks*/ - trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); - if (trail_cnt > ICL_TRAIL_CNT_MAX) { - drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n", - trail_cnt); - trail_cnt = ICL_TRAIL_CNT_MAX; - } + tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); + tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3); - /* tclk pre count in escape clocks */ - tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); - if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { - drm_dbg_kms(display->drm, - "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); - tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; - } + ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns); + ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7); - /* hs zero cnt in escape clocks */ - hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - - ths_prepare_ns, tlpx_ns); - if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { - drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n", - hs_zero_cnt); - hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; - } + ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - + mipi_config->ths_prepare, tlpx_ns); + ths_zero_esc_clk = min(ths_zero_esc_clk, 15); - /* hs exit zero cnt in escape clocks */ - exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); - if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { - drm_dbg_kms(display->drm, - "exit_zero_cnt out of range (%d)\n", - exit_zero_cnt); - exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; - } + ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); + ths_exit_esc_clk = min(ths_exit_esc_clk, 7); /* clock lane dphy timings */ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | - CLK_PREPARE(prepare_cnt) | + CLK_PREPARE(tclk_prepare_esc_clk) | CLK_ZERO_OVERRIDE | - CLK_ZERO(clk_zero_cnt) | + CLK_ZERO(tclk_zero_esc_clk) | CLK_PRE_OVERRIDE | - CLK_PRE(tclk_pre_cnt) | - CLK_TRAIL_OVERRIDE | - CLK_TRAIL(trail_cnt)); + CLK_PRE(tclk_pre_esc_clk)); /* data lanes dphy timings */ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | - HS_PREPARE(prepare_cnt) | + HS_PREPARE(ths_prepare_esc_clk) | HS_ZERO_OVERRIDE | - HS_ZERO(hs_zero_cnt) | - HS_TRAIL_OVERRIDE | - HS_TRAIL(trail_cnt) | + HS_ZERO(ths_zero_esc_clk) | HS_EXIT_OVERRIDE | - HS_EXIT(exit_zero_cnt)); + HS_EXIT(ths_exit_esc_clk)); intel_dsi_log_params(intel_dsi); } diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 03dc54c802d3..e83feca5c9c9 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -33,16 +33,17 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> -#include "i915_drv.h" #include "intel_atomic.h" #include "intel_cdclk.h" +#include "intel_display_core.h" #include "intel_display_types.h" #include "intel_dp_tunnel.h" +#include "intel_fb.h" #include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" -#include "intel_fb.h" #include "skl_universal_plane.h" /** @@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, struct drm_property *property, u64 *val) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(connector->dev); const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); - if (property == dev_priv->display.properties.force_audio) + if (property == display->properties.force_audio) *val = intel_conn_state->force_audio; - else if (property == dev_priv->display.properties.broadcast_rgb) + else if (property == display->properties.broadcast_rgb) *val = intel_conn_state->broadcast_rgb; else { - drm_dbg_atomic(&dev_priv->drm, + drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n", property->base.id, property->name); return -EINVAL; @@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct drm_property *property, u64 val) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(connector->dev); struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); - if (property == dev_priv->display.properties.force_audio) { + if (property == display->properties.force_audio) { intel_conn_state->force_audio = val; return 0; } - if (property == dev_priv->display.properties.broadcast_rgb) { + if (property == display->properties.broadcast_rgb) { intel_conn_state->broadcast_rgb = val; return 0; } - drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n", + drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n", property->base.id, property->name); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 178dc6c8de80..4f3fa966c537 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -16,6 +16,7 @@ #include "intel_backlight_regs.h" #include "intel_connector.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dp_aux_backlight.h" #include "intel_dsi_dcs_backlight.h" @@ -901,11 +902,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) { struct intel_connector *connector = bl_get_data(bd); struct intel_display *display = to_intel_display(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); - intel_wakeref_t wakeref; int ret = 0; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + with_intel_display_rpm(display) { u32 hw_level; drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index a8d08d7d82b3..fabfcf2caa69 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -37,6 +37,7 @@ #include "i915_drv.h" #include "intel_display.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_gmbus.h" @@ -3115,7 +3116,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display { struct drm_i915_private *i915 = to_i915(display->drm); const struct vbt_header *vbt = NULL; - intel_wakeref_t wakeref; vbt = firmware_get_vbt(display, sizep); @@ -3127,11 +3127,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display * through MMIO or PCI mapping */ if (!vbt && IS_DGFX(i915)) - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_display_rpm(display) vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash"); if (!vbt) - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_display_rpm(display) vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM"); return vbt; diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 98b898a1de8f..a5dd2932b852 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -39,14 +39,15 @@ struct intel_qgv_info { u8 deinterleave; }; -static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, +static int dg1_mchbar_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 dclk_ratio, dclk_reference; u32 val; - val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC); + val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC); dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val); if (val & DG1_QCLK_REFERENCE) dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ @@ -54,18 +55,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); - val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); + val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); if (val & DG1_GEAR_TYPE) sp->dclk *= 2; if (sp->dclk == 0) return -EINVAL; - val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR); + val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR); sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val); sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val); - val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH); + val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH); sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val); sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val); @@ -74,22 +75,23 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, return 0; } -static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, +static int icl_pcode_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val = 0, val2 = 0; u16 dclk; int ret; - ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), &val, &val2); if (ret) return ret; dclk = val & 0xffff; - sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0), + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0), 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; @@ -102,14 +104,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, return 0; } -static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, - struct intel_psf_gv_point *points) +static int adls_pcode_read_psf_gv_point_info(struct intel_display *display, + struct intel_psf_gv_point *points) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val = 0; int ret; int i; - ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; @@ -122,10 +125,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, return 0; } -static u16 icl_qgv_points_mask(struct drm_i915_private *i915) +static u16 icl_qgv_points_mask(struct intel_display *display) { - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; u16 qgv_points = 0, psf_points = 0; /* @@ -142,49 +145,51 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915) return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); } -static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask) +static bool is_sagv_enabled(struct intel_display *display, u16 points_mask) { - return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) & + return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) & ICL_PCODE_REQ_QGV_PT_MASK); } -int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, +int icl_pcode_restrict_qgv_points(struct intel_display *display, u32 points_mask) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) return 0; /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, + ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, points_mask, ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, 1); if (ret < 0) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to disable qgv points (0x%x) points: 0x%x\n", ret, points_mask); return ret; } - dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ? + display->sagv.status = is_sagv_enabled(display, points_mask) ? I915_SAGV_ENABLED : I915_SAGV_DISABLED; return 0; } -static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, +static int mtl_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val, val2; u16 dclk; - val = intel_uncore_read(&dev_priv->uncore, + val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_QGV_POINT_LOW(point)); - val2 = intel_uncore_read(&dev_priv->uncore, + val2 = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000); @@ -200,29 +205,30 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, } static int -intel_read_qgv_point_info(struct drm_i915_private *dev_priv, +intel_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { - if (DISPLAY_VER(dev_priv) >= 14) - return mtl_read_qgv_point_info(dev_priv, sp, point); - else if (IS_DG1(dev_priv)) - return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point); + if (DISPLAY_VER(display) >= 14) + return mtl_read_qgv_point_info(display, sp, point); + else if (display->platform.dg1) + return dg1_mchbar_read_qgv_point_info(display, sp, point); else - return icl_pcode_read_qgv_point_info(dev_priv, sp, point); + return icl_pcode_read_qgv_point_info(display, sp, point); } -static int icl_get_qgv_points(struct drm_i915_private *dev_priv, +static int icl_get_qgv_points(struct intel_display *display, struct intel_qgv_info *qi, bool is_y_tile) { - const struct dram_info *dram_info = &dev_priv->dram_info; + struct drm_i915_private *i915 = to_i915(display->drm); + const struct dram_info *dram_info = &i915->dram_info; int i, ret; qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = 4; @@ -251,7 +257,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, MISSING_CASE(dram_info->type); return -EINVAL; } - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = is_y_tile ? 8 : 4; @@ -266,7 +272,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_LPDDR4: - if (IS_ROCKETLAKE(dev_priv)) { + if (display->platform.rocketlake) { qi->t_bl = 8; qi->max_numchannels = 4; qi->channel_width = 32; @@ -285,39 +291,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->max_numchannels = 1; break; } - } else if (DISPLAY_VER(dev_priv) == 11) { - qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; + } else if (DISPLAY_VER(display) == 11) { + qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8; qi->max_numchannels = 1; } - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, qi->num_points > ARRAY_SIZE(qi->points))) qi->num_points = ARRAY_SIZE(qi->points); for (i = 0; i < qi->num_points; i++) { struct intel_qgv_point *sp = &qi->points[i]; - ret = intel_read_qgv_point_info(dev_priv, sp, i); + ret = intel_read_qgv_point_info(display, sp, i); if (ret) { - drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i); + drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i); return ret; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, sp->t_rcd, sp->t_rc); } if (qi->num_psf_points > 0) { - ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points); + ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points); if (ret) { - drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n"); + drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n"); qi->num_psf_points = 0; } for (i = 0; i < qi->num_psf_points; i++) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSF GV %d: CLK=%d \n", i, qi->psf_points[i].clk); } @@ -405,20 +411,28 @@ static const struct intel_sa_info xe2_hpd_ecc_sa_info = { /* Other values not used by simplified algorithm */ }; -static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +static const struct intel_sa_info xe3lpd_sa_info = { + .deburst = 32, + .deprogbwlimit = 65, /* GB/s */ + .displayrtids = 256, + .derating = 10, +}; + +static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ - int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int num_channels = max_t(u8, 1, i915->dram_info.num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; - int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); + int num_groups = ARRAY_SIZE(display->bw.max); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + ret = icl_get_qgv_points(display, &qi, is_y_tile); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } @@ -429,7 +443,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; + struct intel_bw_info *bi = &display->bw.max[i]; int clpchgroup; int j; @@ -456,7 +470,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } @@ -467,44 +481,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + display->sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->display.sagv.status = I915_SAGV_ENABLED; + display->sagv.status = I915_SAGV_ENABLED; return 0; } -static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_qgv_info qi = {}; - const struct dram_info *dram_info = &dev_priv->dram_info; + const struct dram_info *dram_info = &i915->dram_info; bool is_y_tile = true; /* assume y tile may be used */ - int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int num_channels = max_t(u8, 1, dram_info->num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw, peakbw; int clperchgroup; - int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); + int num_groups = ARRAY_SIZE(display->bw.max); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + ret = icl_get_qgv_points(display, &qi, is_y_tile); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } - if (DISPLAY_VER(dev_priv) < 14 && + if (DISPLAY_VER(display) < 14 && (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)) num_channels *= 2; qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); - if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) + if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12) qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); - if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels) - drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); + if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels) + drm_warn(display->drm, "Number of channels exceeds max number of channels."); if (qi.max_numchannels != 0) num_channels = min_t(u8, num_channels, qi.max_numchannels); @@ -521,7 +536,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; + struct intel_bw_info *bi = &display->bw.max[i]; struct intel_bw_info *bi_next; int clpchgroup; int j; @@ -529,7 +544,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; if (i < num_groups - 1) { - bi_next = &dev_priv->display.bw.max[i + 1]; + bi_next = &display->bw.max[i + 1]; if (clpchgroup < clperchgroup) bi_next->num_planes = (ipqdepth - clpchgroup) / @@ -561,7 +576,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel num_channels * qi.channel_width, 8); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n", i, j, bi->num_planes, bi->deratedbw[j], bi->peakbw[j]); @@ -572,7 +587,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel bi->psf_bw[j] = adl_calc_psf_bw(sp->clk); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "BW%d / PSF GV %d: num_planes=%d bw=%u\n", i, j, bi->num_planes, bi->psf_bw[j]); } @@ -584,17 +599,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + display->sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->display.sagv.status = I915_SAGV_ENABLED; + display->sagv.status = I915_SAGV_ENABLED; return 0; } -static void dg2_get_bw_info(struct drm_i915_private *i915) +static void dg2_get_bw_info(struct intel_display *display) { - unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000; - int num_groups = ARRAY_SIZE(i915->display.bw.max); + unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000; + int num_groups = ARRAY_SIZE(display->bw.max); int i; /* @@ -605,7 +620,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) * whereas DG2-G11 platforms have 38 GB/s. */ for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &i915->display.bw.max[i]; + struct intel_bw_info *bi = &display->bw.max[i]; bi->num_planes = 1; /* Need only one dummy QGV point per group */ @@ -613,20 +628,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) bi->deratedbw[0] = deratedbw; } - i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + display->sagv.status = I915_SAGV_NOT_CONTROLLED; } -static int xe2_hpd_get_bw_info(struct drm_i915_private *i915, +static int xe2_hpd_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_qgv_info qi = {}; int num_channels = i915->dram_info.num_channels; int peakbw, maxdebw; int ret, i; - ret = icl_get_qgv_points(i915, &qi, true); + ret = icl_get_qgv_points(display, &qi, true); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } @@ -638,33 +654,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915, const struct intel_qgv_point *point = &qi.points[i]; int bw = num_channels * (qi.channel_width / 8) * point->dclk; - i915->display.bw.max[0].deratedbw[i] = + display->bw.max[0].deratedbw[i] = min(maxdebw, (100 - sa->derating) * bw / 100); - i915->display.bw.max[0].peakbw[i] = bw; + display->bw.max[0].peakbw[i] = bw; - drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n", - i, i915->display.bw.max[0].deratedbw[i], - i915->display.bw.max[0].peakbw[i]); + drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n", + i, display->bw.max[0].deratedbw[i], + display->bw.max[0].peakbw[i]); } /* Bandwidth does not depend on # of planes; set all groups the same */ - i915->display.bw.max[0].num_planes = 1; - i915->display.bw.max[0].num_qgv_points = qi.num_points; - for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++) - memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0], - sizeof(i915->display.bw.max[0])); + display->bw.max[0].num_planes = 1; + display->bw.max[0].num_qgv_points = qi.num_points; + for (i = 1; i < ARRAY_SIZE(display->bw.max); i++) + memcpy(&display->bw.max[i], &display->bw.max[0], + sizeof(display->bw.max[0])); /* * Xe2_HPD should always have exactly two QGV points representing * battery and plugged-in operation. */ - drm_WARN_ON(&i915->drm, qi.num_points != 2); - i915->display.sagv.status = I915_SAGV_ENABLED; + drm_WARN_ON(display->drm, qi.num_points != 2); + display->sagv.status = I915_SAGV_ENABLED; return 0; } -static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, +static unsigned int icl_max_bw_index(struct intel_display *display, int num_planes, int qgv_point) { int i; @@ -674,9 +690,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) { + for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) { const struct intel_bw_info *bi = - &dev_priv->display.bw.max[i]; + &display->bw.max[i]; /* * Pcode will not expose all QGV points when @@ -692,7 +708,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, return UINT_MAX; } -static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, +static unsigned int tgl_max_bw_index(struct intel_display *display, int num_planes, int qgv_point) { int i; @@ -702,9 +718,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) { + for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) { const struct intel_bw_info *bi = - &dev_priv->display.bw.max[i]; + &display->bw.max[i]; /* * Pcode will not expose all QGV points when @@ -720,57 +736,59 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, return 0; } -static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, +static unsigned int adl_psf_bw(struct intel_display *display, int psf_gv_point) { const struct intel_bw_info *bi = - &dev_priv->display.bw.max[0]; + &display->bw.max[0]; return bi->psf_bw[psf_gv_point]; } -static unsigned int icl_qgv_bw(struct drm_i915_private *i915, +static unsigned int icl_qgv_bw(struct intel_display *display, int num_active_planes, int qgv_point) { unsigned int idx; - if (DISPLAY_VER(i915) >= 12) - idx = tgl_max_bw_index(i915, num_active_planes, qgv_point); + if (DISPLAY_VER(display) >= 12) + idx = tgl_max_bw_index(display, num_active_planes, qgv_point); else - idx = icl_max_bw_index(i915, num_active_planes, qgv_point); + idx = icl_max_bw_index(display, num_active_planes, qgv_point); - if (idx >= ARRAY_SIZE(i915->display.bw.max)) + if (idx >= ARRAY_SIZE(display->bw.max)) return 0; - return i915->display.bw.max[idx].deratedbw[qgv_point]; + return display->bw.max[idx].deratedbw[qgv_point]; } -void intel_bw_init_hw(struct drm_i915_private *dev_priv) +void intel_bw_init_hw(struct intel_display *display) { - const struct dram_info *dram_info = &dev_priv->dram_info; + const struct dram_info *dram_info = &to_i915(display->drm)->dram_info; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) && + if (DISPLAY_VER(display) >= 30) + tgl_get_bw_info(display, &xe3lpd_sa_info); + else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx && dram_info->type == INTEL_DRAM_GDDR_ECC) - xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info); - else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) - xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info); - else if (DISPLAY_VER(dev_priv) >= 14) - tgl_get_bw_info(dev_priv, &mtl_sa_info); - else if (IS_DG2(dev_priv)) - dg2_get_bw_info(dev_priv); - else if (IS_ALDERLAKE_P(dev_priv)) - tgl_get_bw_info(dev_priv, &adlp_sa_info); - else if (IS_ALDERLAKE_S(dev_priv)) - tgl_get_bw_info(dev_priv, &adls_sa_info); - else if (IS_ROCKETLAKE(dev_priv)) - tgl_get_bw_info(dev_priv, &rkl_sa_info); - else if (DISPLAY_VER(dev_priv) == 12) - tgl_get_bw_info(dev_priv, &tgl_sa_info); - else if (DISPLAY_VER(dev_priv) == 11) - icl_get_bw_info(dev_priv, &icl_sa_info); + xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info); + else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) + xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info); + else if (DISPLAY_VER(display) >= 14) + tgl_get_bw_info(display, &mtl_sa_info); + else if (display->platform.dg2) + dg2_get_bw_info(display); + else if (display->platform.alderlake_p) + tgl_get_bw_info(display, &adlp_sa_info); + else if (display->platform.alderlake_s) + tgl_get_bw_info(display, &adls_sa_info); + else if (display->platform.rocketlake) + tgl_get_bw_info(display, &rkl_sa_info); + else if (DISPLAY_VER(display) == 12) + tgl_get_bw_info(display, &tgl_sa_info); + else if (DISPLAY_VER(display) == 11) + icl_get_bw_info(display, &icl_sa_info); } static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state) @@ -784,8 +802,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); unsigned int data_rate = 0; enum plane_id plane_id; @@ -799,7 +817,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_ data_rate += crtc_state->data_rate[plane_id]; - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) data_rate += crtc_state->data_rate_y[plane_id]; } @@ -807,39 +825,38 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_ } /* "Maximum Pipe Read Bandwidth" */ -static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state) +static int intel_bw_crtc_min_cdclk(struct intel_display *display, + unsigned int data_rate) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - - if (DISPLAY_VER(i915) < 12) + if (DISPLAY_VER(display) < 12) return 0; - return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512); + return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512); } -static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv, +static unsigned int intel_bw_num_active_planes(struct intel_display *display, const struct intel_bw_state *bw_state) { unsigned int num_active_planes = 0; enum pipe pipe; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) num_active_planes += bw_state->num_active_planes[pipe]; return num_active_planes; } -static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, +static unsigned int intel_bw_data_rate(struct intel_display *display, const struct intel_bw_state *bw_state) { + struct drm_i915_private *i915 = to_i915(display->drm); unsigned int data_rate = 0; enum pipe pipe; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) data_rate += bw_state->data_rate[pipe]; - if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv)) + if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915)) data_rate = DIV_ROUND_UP(data_rate * 105, 100); return data_rate; @@ -848,10 +865,10 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, struct intel_bw_state * intel_atomic_get_old_bw_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj); + bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj); return to_intel_bw_state(bw_state); } @@ -859,10 +876,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state) struct intel_bw_state * intel_atomic_get_new_bw_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj); + bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj); return to_intel_bw_state(bw_state); } @@ -870,27 +887,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state) struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj); + bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj); if (IS_ERR(bw_state)) return ERR_CAST(bw_state); return to_intel_bw_state(bw_state); } -static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915, +static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display, int num_active_planes) { - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; unsigned int max_bw_point = 0; unsigned int max_bw = 0; int i; for (i = 0; i < num_qgv_points; i++) { unsigned int max_data_rate = - icl_qgv_bw(i915, num_active_planes, i); + icl_qgv_bw(display, num_active_planes, i); /* * We need to know which qgv point gives us @@ -909,23 +926,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915, return max_bw_point; } -static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915, +static u16 icl_prepare_qgv_points_mask(struct intel_display *display, unsigned int qgv_points, unsigned int psf_points) { return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | - ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915); + ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display); } -static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915) +static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display) { - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; unsigned int max_bw_point_mask = 0; unsigned int max_bw = 0; int i; for (i = 0; i < num_psf_gv_points; i++) { - unsigned int max_data_rate = adl_psf_bw(i915, i); + unsigned int max_data_rate = adl_psf_bw(display, i); if (max_data_rate > max_bw) { max_bw_point_mask = BIT(i); @@ -938,29 +955,29 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915) return max_bw_point_mask; } -static void icl_force_disable_sagv(struct drm_i915_private *i915, +static void icl_force_disable_sagv(struct intel_display *display, struct intel_bw_state *bw_state) { - unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0); - unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915); + unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0); + unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display); - bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915, + bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display, qgv_points, psf_points); - drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n", + drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n", bw_state->qgv_points_mask); - icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask); + icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask); } -static int mtl_find_qgv_points(struct drm_i915_private *i915, +static int mtl_find_qgv_points(struct intel_display *display, unsigned int data_rate, unsigned int num_active_planes, struct intel_bw_state *new_bw_state) { unsigned int best_rate = UINT_MAX; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; unsigned int qgv_peak_bw = 0; int i; int ret; @@ -974,9 +991,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is * not enabled. PM Demand code will clamp the value for the register */ - if (!intel_can_enable_sagv(i915, new_bw_state)) { + if (!intel_can_enable_sagv(display, new_bw_state)) { new_bw_state->qgv_point_peakbw = U16_MAX; - drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw."); + drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw."); return 0; } @@ -986,27 +1003,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, */ for (i = 0; i < num_qgv_points; i++) { unsigned int bw_index = - tgl_max_bw_index(i915, num_active_planes, i); + tgl_max_bw_index(display, num_active_planes, i); unsigned int max_data_rate; - if (bw_index >= ARRAY_SIZE(i915->display.bw.max)) + if (bw_index >= ARRAY_SIZE(display->bw.max)) continue; - max_data_rate = i915->display.bw.max[bw_index].deratedbw[i]; + max_data_rate = display->bw.max[bw_index].deratedbw[i]; if (max_data_rate < data_rate) continue; if (max_data_rate - data_rate < best_rate) { best_rate = max_data_rate - data_rate; - qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i]; + qgv_peak_bw = display->bw.max[bw_index].peakbw[i]; } - drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", + drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", i, max_data_rate, data_rate, qgv_peak_bw); } - drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", + drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", qgv_peak_bw, data_rate); /* @@ -1014,7 +1031,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, * satisfying the required data rate is found */ if (qgv_peak_bw == 0) { - drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", + drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } @@ -1025,14 +1042,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, return 0; } -static int icl_find_qgv_points(struct drm_i915_private *i915, +static int icl_find_qgv_points(struct intel_display *display, unsigned int data_rate, unsigned int num_active_planes, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; u16 psf_points = 0; u16 qgv_points = 0; int i; @@ -1043,22 +1060,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, return ret; for (i = 0; i < num_qgv_points; i++) { - unsigned int max_data_rate = icl_qgv_bw(i915, + unsigned int max_data_rate = icl_qgv_bw(display, num_active_planes, i); if (max_data_rate >= data_rate) qgv_points |= BIT(i); - drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n", + drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n", i, max_data_rate, data_rate); } for (i = 0; i < num_psf_gv_points; i++) { - unsigned int max_data_rate = adl_psf_bw(i915, i); + unsigned int max_data_rate = adl_psf_bw(display, i); if (max_data_rate >= data_rate) psf_points |= BIT(i); - drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d" + drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d" " required %d\n", i, max_data_rate, data_rate); } @@ -1069,14 +1086,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * reasons. */ if (qgv_points == 0) { - drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory" + drm_dbg_kms(display->drm, "No QGV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } if (num_psf_gv_points > 0 && psf_points == 0) { - drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory" + drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; @@ -1087,9 +1104,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * we can't enable SAGV due to the increased memory latency it may * cause. */ - if (!intel_can_enable_sagv(i915, new_bw_state)) { - qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes); - drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n", + if (!intel_can_enable_sagv(display, new_bw_state)) { + qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes); + drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n", qgv_points); } @@ -1097,7 +1114,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * We store the ones which need to be masked as that is what PCode * actually accepts as a parameter. */ - new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915, + new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display, qgv_points, psf_points); /* @@ -1113,80 +1130,90 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, return 0; } -static int intel_bw_check_qgv_points(struct drm_i915_private *i915, +static int intel_bw_check_qgv_points(struct intel_display *display, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { - unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state); + unsigned int data_rate = intel_bw_data_rate(display, new_bw_state); unsigned int num_active_planes = - intel_bw_num_active_planes(i915, new_bw_state); + intel_bw_num_active_planes(display, new_bw_state); data_rate = DIV_ROUND_UP(data_rate, 1000); - if (DISPLAY_VER(i915) >= 14) - return mtl_find_qgv_points(i915, data_rate, num_active_planes, + if (DISPLAY_VER(display) >= 14) + return mtl_find_qgv_points(display, data_rate, num_active_planes, new_bw_state); else - return icl_find_qgv_points(i915, data_rate, num_active_planes, + return icl_find_qgv_points(display, data_rate, num_active_planes, old_bw_state, new_bw_state); } -static bool intel_bw_state_changed(struct drm_i915_private *i915, +static bool intel_dbuf_bw_changed(struct intel_display *display, + const struct intel_dbuf_bw *old_dbuf_bw, + const struct intel_dbuf_bw *new_dbuf_bw) +{ + enum dbuf_slice slice; + + for_each_dbuf_slice(display, slice) { + if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] || + old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice]) + return true; + } + + return false; +} + +static bool intel_bw_state_changed(struct intel_display *display, const struct intel_bw_state *old_bw_state, const struct intel_bw_state *new_bw_state) { enum pipe pipe; - for_each_pipe(i915, pipe) { - const struct intel_dbuf_bw *old_crtc_bw = + for_each_pipe(display, pipe) { + const struct intel_dbuf_bw *old_dbuf_bw = &old_bw_state->dbuf_bw[pipe]; - const struct intel_dbuf_bw *new_crtc_bw = + const struct intel_dbuf_bw *new_dbuf_bw = &new_bw_state->dbuf_bw[pipe]; - enum dbuf_slice slice; - for_each_dbuf_slice(i915, slice) { - if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] || - old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice]) - return true; - } + if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw)) + return true; - if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe]) + if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) != + intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe])) return true; } return false; } -static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state, +static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw, struct intel_crtc *crtc, enum plane_id plane_id, const struct skl_ddb_entry *ddb, unsigned int data_rate) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; - unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb); + struct intel_display *display = to_intel_display(crtc); + unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb); enum dbuf_slice slice; /* * The arbiter can only really guarantee an * equal share of the total bw to each plane. */ - for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) { - crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate); - crtc_bw->active_planes[slice] |= BIT(plane_id); + for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) { + dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate); + dbuf_bw->active_planes[slice] |= BIT(plane_id); } } -static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, +static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; enum plane_id plane_id; - memset(crtc_bw, 0, sizeof(*crtc_bw)); + memset(dbuf_bw, 0, sizeof(*dbuf_bw)); if (!crtc_state->hw.active) return; @@ -1199,12 +1226,12 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, if (plane_id == PLANE_CURSOR) continue; - skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id, + skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id, &crtc_state->wm.skl.plane_ddb[plane_id], crtc_state->data_rate[plane_id]); - if (DISPLAY_VER(i915) < 11) - skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id, + if (DISPLAY_VER(display) < 11) + skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id, &crtc_state->wm.skl.plane_ddb_y[plane_id], crtc_state->data_rate[plane_id]); } @@ -1212,13 +1239,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, /* "Maximum Data Buffer Bandwidth" */ static int -intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, +intel_bw_dbuf_min_cdclk(struct intel_display *display, const struct intel_bw_state *bw_state) { unsigned int total_max_bw = 0; enum dbuf_slice slice; - for_each_dbuf_slice(i915, slice) { + for_each_dbuf_slice(display, slice) { int num_active_planes = 0; unsigned int max_bw = 0; enum pipe pipe; @@ -1227,11 +1254,11 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, * The arbiter can only really guarantee an * equal share of the total bw to each plane. */ - for_each_pipe(i915, pipe) { - const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe]; + for_each_pipe(display, pipe) { + const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe]; - max_bw = max(crtc_bw->max_bw[slice], max_bw); - num_active_planes += hweight8(crtc_bw->active_planes[slice]); + max_bw = max(dbuf_bw->max_bw[slice], max_bw); + num_active_planes += hweight8(dbuf_bw->active_planes[slice]); } max_bw *= num_active_planes; @@ -1241,16 +1268,18 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, return DIV_ROUND_UP(total_max_bw, 64); } -int intel_bw_min_cdclk(struct drm_i915_private *i915, +int intel_bw_min_cdclk(struct intel_display *display, const struct intel_bw_state *bw_state) { enum pipe pipe; int min_cdclk; - min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state); + min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state); - for_each_pipe(i915, pipe) - min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]); + for_each_pipe(display, pipe) + min_cdclk = max(min_cdclk, + intel_bw_crtc_min_cdclk(display, + bw_state->data_rate[pipe])); return min_cdclk; } @@ -1258,42 +1287,49 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915, int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_bw_state *new_bw_state = NULL; const struct intel_bw_state *old_bw_state = NULL; const struct intel_cdclk_state *cdclk_state; - const struct intel_crtc_state *crtc_state; + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; int old_min_cdclk, new_min_cdclk; struct intel_crtc *crtc; int i; - if (DISPLAY_VER(dev_priv) < 9) + if (DISPLAY_VER(display) < 9) return 0; - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw; + + skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state); + skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state); + + if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw)) + continue; + new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); old_bw_state = intel_atomic_get_old_bw_state(state); - skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state); - - new_bw_state->min_cdclk[crtc->pipe] = - intel_bw_crtc_min_cdclk(crtc_state); + new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw; } if (!old_bw_state) return 0; - if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) { + if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) { int ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; } - old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state); - new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state); + old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state); + new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state); /* * No need to check against the cdclk state if @@ -1321,7 +1357,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, if (new_min_cdclk <= cdclk_state->bw_min_cdclk) return 0; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n", new_min_cdclk, cdclk_state->bw_min_cdclk); *need_cdclk_calc = true; @@ -1331,7 +1367,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; int i; @@ -1365,7 +1401,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan *changed = true; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CRTC:%d:%s] data rate %u num active planes %u\n", crtc->base.base.id, crtc->base.name, new_bw_state->data_rate[crtc->pipe], @@ -1375,16 +1411,103 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan return 0; } -int intel_bw_atomic_check(struct intel_atomic_state *state) +static int intel_bw_modeset_checks(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + const struct intel_bw_state *old_bw_state; + struct intel_bw_state *new_bw_state; + + if (DISPLAY_VER(display) < 9) + return 0; + + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + old_bw_state = intel_atomic_get_old_bw_state(state); + + new_bw_state->active_pipes = + intel_calc_active_pipes(state, old_bw_state->active_pipes); + + if (new_bw_state->active_pipes != old_bw_state->active_pipes) { + int ret; + + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + return 0; +} + +static int intel_bw_check_sagv_mask(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + const struct intel_bw_state *old_bw_state = NULL; + struct intel_bw_state *new_bw_state = NULL; + struct intel_crtc *crtc; + int ret, i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (intel_crtc_can_enable_sagv(old_crtc_state) == + intel_crtc_can_enable_sagv(new_crtc_state)) + continue; + + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + old_bw_state = intel_atomic_get_old_bw_state(state); + + if (intel_crtc_can_enable_sagv(new_crtc_state)) + new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); + else + new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); + } + + if (!new_bw_state) + return 0; + + if (intel_can_enable_sagv(display, new_bw_state) != + intel_can_enable_sagv(display, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + return 0; +} + +int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms) +{ + struct intel_display *display = to_intel_display(state); bool changed = false; - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_bw_state *new_bw_state; const struct intel_bw_state *old_bw_state; int ret; + if (DISPLAY_VER(display) < 9) + return 0; + + if (any_ms) { + ret = intel_bw_modeset_checks(state); + if (ret) + return ret; + } + + ret = intel_bw_check_sagv_mask(state); + if (ret) + return ret; + /* FIXME earlier gens need some checks too */ - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) return 0; ret = intel_bw_check_data_rate(state, &changed); @@ -1395,9 +1518,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state = intel_atomic_get_new_bw_state(state); if (new_bw_state && - (intel_can_enable_sagv(i915, old_bw_state) != - intel_can_enable_sagv(i915, new_bw_state) || - new_bw_state->force_check_qgv)) + intel_can_enable_sagv(display, old_bw_state) != + intel_can_enable_sagv(display, new_bw_state)) changed = true; /* @@ -1407,28 +1529,25 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) if (!changed) return 0; - ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state); + ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state); if (ret) return ret; - new_bw_state->force_check_qgv = false; - return 0; } static void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); bw_state->data_rate[crtc->pipe] = intel_bw_crtc_data_rate(crtc_state); bw_state->num_active_planes[crtc->pipe] = intel_bw_crtc_num_active_planes(crtc_state); - bw_state->force_check_qgv = true; - drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n", + drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), bw_state->data_rate[crtc->pipe], bw_state->num_active_planes[crtc->pipe]); @@ -1444,6 +1563,7 @@ void intel_bw_update_hw_state(struct intel_display *display) return; bw_state->active_pipes = 0; + bw_state->pipe_sagv_reject = 0; for_each_intel_crtc(display->drm, crtc) { const struct intel_crtc_state *crtc_state = @@ -1455,6 +1575,11 @@ void intel_bw_update_hw_state(struct intel_display *display) if (DISPLAY_VER(display) >= 11) intel_bw_crtc_update(bw_state, crtc_state); + + skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state); + + /* initially SAGV has been forced off */ + bw_state->pipe_sagv_reject |= BIT(pipe); } } @@ -1470,6 +1595,7 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc) bw_state->data_rate[pipe] = 0; bw_state->num_active_planes[pipe] = 0; + memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe])); } static struct intel_global_state * @@ -1495,9 +1621,8 @@ static const struct intel_global_state_funcs intel_bw_funcs = { .atomic_destroy_state = intel_bw_destroy_state, }; -int intel_bw_init(struct drm_i915_private *i915) +int intel_bw_init(struct intel_display *display) { - struct intel_display *display = &i915->display; struct intel_bw_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -1511,8 +1636,8 @@ int intel_bw_init(struct drm_i915_private *i915) * Limit this only if we have SAGV. And for Display version 14 onwards * sagv is handled though pmdemand requests */ - if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13)) - icl_force_disable_sagv(i915, state); + if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13)) + icl_force_disable_sagv(display, state); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 3313e4eac4f0..eb2cc883e9c1 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -12,7 +12,6 @@ #include "intel_display_power.h" #include "intel_global_state.h" -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -49,13 +48,6 @@ struct intel_bw_state { */ u16 qgv_points_mask; - /* - * Flag to force the QGV comparison in atomic check right after the - * hw state readout - */ - bool force_check_qgv; - - int min_cdclk[I915_MAX_PIPES]; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; }; @@ -72,14 +64,14 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state); struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state); -void intel_bw_init_hw(struct drm_i915_private *dev_priv); -int intel_bw_init(struct drm_i915_private *dev_priv); -int intel_bw_atomic_check(struct intel_atomic_state *state); -int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, +void intel_bw_init_hw(struct intel_display *display); +int intel_bw_init(struct intel_display *display); +int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms); +int icl_pcode_restrict_qgv_points(struct intel_display *display, u32 points_mask); int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc); -int intel_bw_min_cdclk(struct drm_i915_private *i915, +int intel_bw_min_cdclk(struct intel_display *display, const struct intel_bw_state *bw_state); void intel_bw_update_hw_state(struct intel_display *display); void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 2a8749a0213e..6830950aae3f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1972,9 +1972,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display, static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display, const struct intel_cdclk_config *cdclk_config) { - struct drm_i915_private *i915 = to_i915(display->drm); - - intel_dbuf_mdclk_cdclk_ratio_update(i915, + intel_dbuf_mdclk_cdclk_ratio_update(display, intel_mdclk_cdclk_ratio(display, cdclk_config), cdclk_config->joined_mbus); } @@ -2808,7 +2806,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat static int intel_compute_min_cdclk(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_bw_state *bw_state; @@ -2836,7 +2833,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) bw_state = intel_atomic_get_new_bw_state(state); if (bw_state) { - min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state); + min_cdclk = intel_bw_min_cdclk(display, bw_state); if (cdclk_state->bw_min_cdclk != min_cdclk) { int ret; @@ -3342,6 +3339,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) void intel_cdclk_update_hw_state(struct intel_display *display) { + const struct intel_bw_state *bw_state = + to_intel_bw_state(display->bw.obj.state); struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); struct intel_crtc *crtc; @@ -3359,6 +3358,8 @@ void intel_cdclk_update_hw_state(struct intel_display *display) cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state); cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level; } + + cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state); } void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index cfe14162231d..98dddf72c0eb 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -22,7 +22,9 @@ * */ -#include "i915_drv.h" +#include <drm/drm_print.h> + +#include "i915_utils.h" #include "i9xx_plane_regs.h" #include "intel_color.h" #include "intel_color_regs.h" @@ -405,14 +407,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state) static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(display->drm); /* icl+ have dedicated output CSC */ if (DISPLAY_VER(display) >= 11) return false; /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ - if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915)) + if (DISPLAY_VER(display) < 7 || display->platform.ivybridge) return false; return crtc_state->limited_color_range; @@ -516,7 +517,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void ilk_assign_csc(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(display->drm); bool limited_color_range = ilk_csc_limited_range(crtc_state); if (crtc_state->hw.ctm) { @@ -538,7 +538,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state) * LUT is needed but CSC is not we need to load an * identity matrix. */ - drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915)); + drm_WARN_ON(display->drm, !display->platform.geminilake); ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity); } else { @@ -3983,12 +3983,10 @@ int intel_color_init(struct intel_display *display) void intel_color_init_hooks(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (HAS_GMCH(display)) { - if (IS_CHERRYVIEW(i915)) + if (display->platform.cherryview) display->funcs.color = &chv_color_funcs; - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) display->funcs.color = &vlv_color_funcs; else if (DISPLAY_VER(display) >= 4) display->funcs.color = &i965_color_funcs; @@ -4005,7 +4003,7 @@ void intel_color_init_hooks(struct intel_display *display) display->funcs.color = &skl_color_funcs; else if (DISPLAY_VER(display) == 8) display->funcs.color = &bdw_color_funcs; - else if (IS_HASWELL(i915)) + else if (display->platform.haswell) display->funcs.color = &hsw_color_funcs; else if (DISPLAY_VER(display) == 7) display->funcs.color = &ivb_color_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 17eea244cc83..f5cc38dbe559 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -3,6 +3,8 @@ * Copyright © 2018 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_reg.h" #include "i915_utils.h" #include "intel_combo_phy.h" diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index e42357bd9e80..6c81c9f2fd09 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -31,8 +31,10 @@ #include <drm/drm_probe_helper.h> #include "i915_drv.h" +#include "i915_utils.h" #include "intel_backlight.h" #include "intel_connector.h" +#include "intel_display_core.h" #include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_hdcp.h" @@ -154,13 +156,14 @@ void intel_connector_destroy(struct drm_connector *connector) int intel_connector_register(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_i915_private *i915 = to_i915(connector->dev); int ret; ret = intel_backlight_device_register(intel_connector); if (ret) goto err; - if (i915_inject_probe_failure(to_i915(connector->dev))) { + if (i915_inject_probe_failure(i915)) { ret = -EFAULT; goto err_backlight; } @@ -204,10 +207,10 @@ bool intel_connector_get_hw_state(struct intel_connector *connector) enum pipe intel_connector_get_pipe(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; + struct intel_display *display = to_intel_display(connector); - drm_WARN_ON(dev, - !drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + drm_WARN_ON(display->drm, + !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex)); if (!connector->base.state->crtc) return INVALID_PIPE; @@ -264,20 +267,19 @@ static const struct drm_prop_enum_list force_audio_names[] = { void intel_attach_force_audio_property(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(connector->dev); struct drm_property *prop; - prop = dev_priv->display.properties.force_audio; + prop = display->properties.force_audio; if (prop == NULL) { - prop = drm_property_create_enum(dev, 0, - "audio", - force_audio_names, - ARRAY_SIZE(force_audio_names)); + prop = drm_property_create_enum(display->drm, 0, + "audio", + force_audio_names, + ARRAY_SIZE(force_audio_names)); if (prop == NULL) return; - dev_priv->display.properties.force_audio = prop; + display->properties.force_audio = prop; } drm_object_attach_property(&connector->base, prop, 0); } @@ -291,20 +293,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = { void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(connector->dev); struct drm_property *prop; - prop = dev_priv->display.properties.broadcast_rgb; + prop = display->properties.broadcast_rgb; if (prop == NULL) { - prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, - "Broadcast RGB", - broadcast_rgb_names, - ARRAY_SIZE(broadcast_rgb_names)); + prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM, + "Broadcast RGB", + broadcast_rgb_names, + ARRAY_SIZE(broadcast_rgb_names)); if (prop == NULL) return; - dev_priv->display.properties.broadcast_rgb = prop; + display->properties.broadcast_rgb = prop; } drm_object_attach_property(&connector->base, prop, 0); @@ -336,14 +337,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector) void intel_attach_scaling_mode_property(struct drm_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); u32 scaling_modes; scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); /* On GMCH platforms borders are only possible on the LVDS port */ - if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS) scaling_modes |= BIT(DRM_MODE_SCALE_CENTER); drm_connector_attach_scaling_mode_property(connector, scaling_modes); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 76ffb3f8467c..cca22d2402e8 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -532,8 +532,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct intel_display *display = to_intel_display(connector->dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); - struct drm_i915_private *dev_priv = to_i915(connector->dev); - bool reenable_hpd; u32 adpa; bool ret; u32 save_adpa; @@ -550,7 +548,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) * * Just disable HPD interrupts here to prevent this */ - reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); + intel_hpd_block(&crt->base); save_adpa = adpa = intel_de_read(display, crt->adpa_reg); drm_dbg_kms(display->drm, @@ -577,8 +575,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) drm_dbg_kms(display->drm, "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); - if (reenable_hpd) - intel_hpd_enable(dev_priv, crt->base.hpd_pin); + intel_hpd_clear_and_unblock(&crt->base); return ret; } @@ -609,7 +606,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) for (i = 0; i < tries ; i++) { /* turn on the FORCE_DETECT */ - i915_hotplug_interrupt_update(dev_priv, + i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ @@ -627,7 +624,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) intel_de_write(display, PORT_HOTPLUG_STAT(display), CRT_HOTPLUG_INT_STATUS); - i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); + i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0); return ret; } @@ -880,7 +877,7 @@ intel_crt_detect(struct drm_connector *connector, wakeref = intel_display_power_get(display, encoder->power_domain); - if (I915_HAS_HOTPLUG(display)) { + if (HAS_HOTPLUG(display)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so * only trust an assertion that the monitor is connected. @@ -904,7 +901,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(display)) { + if (HAS_HOTPLUG(display)) { status = connector_status_disconnected; goto out; } @@ -1084,7 +1081,7 @@ void intel_crt_init(struct intel_display *display) crt->base.power_domain = POWER_DOMAIN_PORT_CRT; - if (I915_HAS_HOTPLUG(display) && + if (HAS_HOTPLUG(display) && !dmi_check_system(intel_spurious_crt_detect)) { crt->base.hpd_pin = HPD_CRT; crt->base.hotplug = intel_encoder_hotplug; diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 599ddce96371..0c7f91046996 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -5,9 +5,10 @@ #include <drm/drm_edid.h> #include <drm/drm_eld.h> +#include <drm/drm_print.h> -#include "i915_drv.h" #include "intel_crtc_state_dump.h" +#include "intel_display_core.h" #include "intel_display_types.h" #include "intel_hdmi.h" #include "intel_vblank.h" @@ -42,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p, } static void -intel_dump_infoframe(struct drm_i915_private *i915, +intel_dump_infoframe(struct intel_display *display, const union hdmi_infoframe *frame) { if (!drm_debug_enabled(DRM_UT_KMS)) return; - hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame); + hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame); } #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x @@ -136,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p, } static void -ilk_dump_csc(struct drm_i915_private *i915, +ilk_dump_csc(struct intel_display *display, struct drm_printer *p, const char *name, const struct intel_csc_matrix *csc) @@ -152,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915, csc->coeff[3 * i + 1], csc->coeff[3 * i + 2]); - if (DISPLAY_VER(i915) < 7) + if (DISPLAY_VER(display) < 7) return; drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name, @@ -178,7 +179,6 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, { struct intel_display *display = to_intel_display(pipe_config); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_plane_state *plane_state; struct intel_plane *plane; struct drm_printer p; @@ -188,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, if (!drm_debug_enabled(DRM_UT_KMS)) return; - p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL); + p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL); drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n", crtc->base.base.id, crtc->base.name, @@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) - intel_dump_infoframe(i915, &pipe_config->infoframes.avi); + intel_dump_infoframe(display, &pipe_config->infoframes.avi); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) - intel_dump_infoframe(i915, &pipe_config->infoframes.spd); + intel_dump_infoframe(display, &pipe_config->infoframes.spd); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) - intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi); + intel_dump_infoframe(display, &pipe_config->infoframes.hdmi); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) - intel_dump_infoframe(i915, &pipe_config->infoframes.drm); + intel_dump_infoframe(display, &pipe_config->infoframes.drm); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA)) - intel_dump_infoframe(i915, &pipe_config->infoframes.drm); + intel_dump_infoframe(display, &pipe_config->infoframes.drm); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_VSC)) drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc); @@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, pipe_config->hw.adjusted_mode.crtc_vdisplay, pipe_config->framestart_delay, pipe_config->msa_timing_delay); - drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n", + drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n", str_yes_no(pipe_config->vrr.enable), + str_yes_no(intel_vrr_is_fixed_rr(pipe_config)), pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline, pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband, pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end); @@ -319,14 +320,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_printf(&p, "linetime: %d, ips linetime: %d\n", pipe_config->linetime, pipe_config->ips_linetime); - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n", crtc->num_scalers, pipe_config->scaler_state.scaler_users, pipe_config->scaler_state.scaler_id, pipe_config->hw.scaling_filter); - if (HAS_GMCH(i915)) + if (HAS_GMCH(display)) drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", pipe_config->gmch_pfit.control, pipe_config->gmch_pfit.pgm_ratios, @@ -343,7 +344,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state); - if (IS_CHERRYVIEW(i915)) + if (display->platform.cherryview) drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", pipe_config->cgm_mode, pipe_config->gamma_mode, pipe_config->gamma_enable, pipe_config->csc_enable); @@ -354,20 +355,20 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n", pipe_config->pre_csc_lut && pipe_config->pre_csc_lut == - i915->display.color.glk_linear_degamma_lut ? "(linear) " : "", + display->color.glk_linear_degamma_lut ? "(linear) " : "", pipe_config->pre_csc_lut ? drm_color_lut_size(pipe_config->pre_csc_lut) : 0, pipe_config->post_csc_lut ? drm_color_lut_size(pipe_config->post_csc_lut) : 0); - if (DISPLAY_VER(i915) >= 11) - ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc); + if (DISPLAY_VER(display) >= 11) + ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc); - if (!HAS_GMCH(i915)) - ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc); - else if (IS_CHERRYVIEW(i915)) + if (!HAS_GMCH(display)) + ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc); + else if (display->platform.cherryview) vlv_dump_csc(&p, "cgm csc", &pipe_config->csc); - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) vlv_dump_csc(&p, "wgc csc", &pipe_config->csc); intel_vdsc_state_dump(&p, 0, pipe_config); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index f38c998935b9..b48ed5df7a96 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -78,6 +78,7 @@ #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" +#include "intel_vrr.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -106,14 +107,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder, return level; } -static bool has_buf_trans_select(struct drm_i915_private *i915) +static bool has_buf_trans_select(struct intel_display *display) { - return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915); + return DISPLAY_VER(display) < 10 && !display->platform.broxton; } -static bool has_iboost(struct drm_i915_private *i915) +static bool has_iboost(struct intel_display *display) { - return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915); + return DISPLAY_VER(display) == 9 && !display->platform.broxton; } /* @@ -124,25 +125,25 @@ static bool has_iboost(struct drm_i915_private *i915) void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 iboost_bit = 0; int i, n_entries; enum port port = encoder->port; const struct intel_ddi_buf_trans *trans; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; /* If we're boosting the current, set bit 31 of trans1 */ - if (has_iboost(dev_priv) && + if (has_iboost(display) && intel_bios_dp_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; for (i = 0; i < n_entries; i++) { - intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i), + intel_de_write(display, DDI_BUF_TRANS_LO(port, i), trans->entries[i].hsw.trans1 | iboost_bit); - intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i), + intel_de_write(display, DDI_BUF_TRANS_HI(port, i), trans->entries[i].hsw.trans2); } } @@ -155,7 +156,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); int level = intel_ddi_level(encoder, crtc_state, 0); u32 iboost_bit = 0; int n_entries; @@ -163,27 +164,25 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, const struct intel_ddi_buf_trans *trans; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; /* If we're boosting the current, set bit 31 of trans1 */ - if (has_iboost(dev_priv) && + if (has_iboost(display) && intel_bios_hdmi_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; /* Entry 9 is for HDMI: */ - intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9), + intel_de_write(display, DDI_BUF_TRANS_LO(port, 9), trans->entries[level].hsw.trans1 | iboost_bit); - intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9), + intel_de_write(display, DDI_BUF_TRANS_HI(port, 9), trans->entries[level].hsw.trans2); } static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(display) >= 14) - return XELPDP_PORT_BUF_CTL1(i915, port); + return XELPDP_PORT_BUF_CTL1(display, port); else return DDI_BUF_CTL(port); } @@ -346,7 +345,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); @@ -359,14 +357,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, if (dig_port->ddi_a_4_lanes) intel_dp->DP |= DDI_A_4_LANES; - if (DISPLAY_VER(i915) >= 14) { + if (DISPLAY_VER(display) >= 14) { if (intel_dp_is_uhbr(crtc_state)) intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT; else intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT; } - if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) { + if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) { intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; @@ -379,8 +377,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, } } -static int icl_calc_tbt_pll_link(struct intel_display *display, - enum port port) +static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port) { u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; @@ -414,15 +411,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 temp; if (!intel_crtc_has_dp_encoder(crtc_state)) return; - drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); + drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)); temp = DP_MSA_MISC_SYNC_CLOCK; @@ -445,7 +441,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, } /* nonsense combination */ - drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && + drm_WARN_ON(display->drm, crtc_state->limited_color_range && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->limited_color_range) @@ -468,7 +464,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) temp |= DP_MSA_MISC_COLOR_VSC_SDP; - intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder), temp); } @@ -507,8 +503,8 @@ static u32 intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; @@ -516,7 +512,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = TRANS_DDI_FUNC_ENABLE; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) temp |= TGL_TRANS_DDI_SELECT_PORT(port); else temp |= TRANS_DDI_SELECT_PORT(port); @@ -578,7 +574,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, temp |= TRANS_DDI_HDMI_SCRAMBLING; if (crtc_state->hdmi_high_tmds_clock_ratio) temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B; @@ -591,11 +587,11 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { enum transcoder master; master = crtc_state->mst_master_transcoder; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, master == INVALID_TRANSCODER); temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master); } @@ -604,7 +600,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, temp |= DDI_PORT_WIDTH(crtc_state->lane_count); } - if (IS_DISPLAY_VER(dev_priv, 8, 10) && + if (IS_DISPLAY_VER(display, 8, 10) && crtc_state->master_transcoder != INVALID_TRANSCODER) { u8 master_select = bdw_trans_port_sync_master_select(crtc_state->master_transcoder); @@ -619,11 +615,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(display) >= 11) { enum transcoder master_transcoder = crtc_state->master_transcoder; u32 ctl2 = 0; @@ -635,12 +630,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, PORT_SYNC_MODE_MASTER_SELECT(master_select); } - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder), + intel_de_write(display, + TRANS_DDI_FUNC_CTL2(display, cpu_transcoder), ctl2); } - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state)); } @@ -654,8 +649,7 @@ void intel_ddi_config_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 ctl; @@ -663,7 +657,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder, ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state); ctl &= ~TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), ctl); } @@ -677,27 +671,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 ctl; - if (DISPLAY_VER(dev_priv) >= 11) - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder), + if (DISPLAY_VER(display) >= 11) + intel_de_write(display, + TRANS_DDI_FUNC_CTL2(display, cpu_transcoder), 0); - ctl = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); + ctl = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING); ctl &= ~TRANS_DDI_FUNC_ENABLE; - if (IS_DISPLAY_VER(dev_priv, 8, 10)) + if (IS_DISPLAY_VER(display, 8, 10)) ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE | TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK); - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (!intel_dp_mst_is_master_trans(crtc_state)) { ctl &= ~(TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); @@ -706,7 +699,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder), + intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), ctl); if (intel_dp_mst_is_slave_trans(crtc_state)) @@ -725,17 +718,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, bool enable, u32 hdcp_mask) { struct intel_display *display = to_intel_display(intel_encoder); - struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; int ret = 0; wakeref = intel_display_power_get_if_enabled(display, intel_encoder->power_domain); - if (drm_WARN_ON(dev, !wakeref)) + if (drm_WARN_ON(display->drm, !wakeref)) return -ENXIO; - intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder), + intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), hdcp_mask, enable ? hdcp_mask : 0); intel_display_power_put(display, intel_encoder->power_domain, wakeref); return ret; @@ -744,7 +735,6 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct intel_display *display = to_intel_display(intel_connector); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder = intel_attached_encoder(intel_connector); int type = intel_connector->base.connector_type; enum port port = encoder->port; @@ -765,12 +755,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) + if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; - ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & + ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_MODE_SELECT_MASK; if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI || @@ -804,7 +794,6 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, u8 *pipe_mask, bool *is_dp_mst) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum port port = encoder->port; intel_wakeref_t wakeref; enum pipe p; @@ -819,13 +808,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, if (!wakeref) return; - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); + tmp = intel_de_read(display, DDI_BUF_CTL(port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; - if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) { - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP)); + if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) { + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: @@ -846,7 +835,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, goto out; } - for_each_pipe(dev_priv, p) { + for_each_pipe(display, p) { enum transcoder cpu_transcoder = (enum transcoder)p; u32 port_mask, ddi_select, ddi_mode; intel_wakeref_t trans_wakeref; @@ -856,7 +845,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, if (!trans_wakeref) continue; - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { port_mask = TGL_TRANS_DDI_PORT_MASK; ddi_select = TGL_TRANS_DDI_SELECT_PORT(port); } else { @@ -864,8 +853,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, ddi_select = TRANS_DDI_SELECT_PORT(port); } - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); + tmp = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder), trans_wakeref); @@ -883,12 +872,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, } if (!*pipe_mask) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "No pipe for [ENCODER:%d:%s] found\n", encoder->base.base.id, encoder->base.name); if (!mst_pipe_mask && dp128b132b_pipe_mask) { - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); /* * If we don't have 8b/10b MST, but have more than one @@ -901,12 +890,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, * can assume it's SST. */ if (hweight8(dp128b132b_pipe_mask) > 1 || - intel_dp_mst_encoder_active_links(dig_port)) + intel_dp_mst_active_streams(intel_dp)) mst_pipe_mask = dp128b132b_pipe_mask; } if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n", encoder->base.base.id, encoder->base.name, *pipe_mask); @@ -914,7 +903,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, } if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n", encoder->base.base.id, encoder->base.name, *pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask); @@ -922,12 +911,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, *is_dp_mst = mst_pipe_mask; out: - if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) { - tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port)); + if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) { + tmp = intel_de_read(display, BXT_PHY_CTL(port)); if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) - drm_err(&dev_priv->drm, + drm_err(display->drm, "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n", encoder->base.base.id, encoder->base.name, tmp); } @@ -1041,8 +1030,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum phy phy = intel_encoder_to_phy(encoder); u32 val; @@ -1050,53 +1038,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, if (cpu_transcoder == TRANSCODER_EDP) return; - if (DISPLAY_VER(dev_priv) >= 13) + if (DISPLAY_VER(display) >= 13) val = TGL_TRANS_CLK_SEL_PORT(phy); - else if (DISPLAY_VER(dev_priv) >= 12) + else if (DISPLAY_VER(display) >= 12) val = TGL_TRANS_CLK_SEL_PORT(encoder->port); else val = TRANS_CLK_SEL_PORT(encoder->port); - intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); + intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val); } void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val; if (cpu_transcoder == TRANSCODER_EDP) return; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) val = TGL_TRANS_CLK_SEL_DISABLED; else val = TRANS_CLK_SEL_DISABLED; - intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); + intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val); } -static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, +static void _skl_ddi_set_iboost(struct intel_display *display, enum port port, u8 iboost) { u32 tmp; - tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0); + tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0); tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); if (iboost) tmp |= iboost << BALANCE_LEG_SHIFT(port); else tmp |= BALANCE_LEG_DISABLE(port); - intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp); + intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp); } static void skl_ddi_set_iboost(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int level) { + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u8 iboost; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) @@ -1109,7 +1097,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, int n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; iboost = trans->entries[level].hsw.i_boost; @@ -1117,28 +1105,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, /* Make sure that the requested I_boost is valid */ if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) { - drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost); + drm_err(display->drm, "Invalid I_boost value %u\n", iboost); return; } - _skl_ddi_set_iboost(dev_priv, encoder->port, iboost); + _skl_ddi_set_iboost(display, encoder->port, iboost); if (encoder->port == PORT_A && dig_port->max_lanes == 4) - _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); + _skl_ddi_set_iboost(display, PORT_E, iboost); } static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int n_entries; encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON(&dev_priv->drm, n_entries < 1)) + if (drm_WARN_ON(display->drm, n_entries < 1)) n_entries = 1; - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, n_entries > ARRAY_SIZE(index_to_dp_signal_levels))) n_entries = ARRAY_SIZE(index_to_dp_signal_levels); @@ -1171,14 +1159,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_encoder_to_phy(encoder); int n_entries, ln; u32 val; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { @@ -1186,25 +1174,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED; intel_dp->hobl_active = is_hobl_buf_trans(trans); - intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val, + intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val, intel_dp->hobl_active ? val : 0); } /* Set PORT_TX_DW5 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | COEFF_POLARITY | CURSOR_PROGRAM | TAP2_DISABLE | TAP3_DISABLE); val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); val |= TAP3_DISABLE; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy), + intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy), SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK, SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) | SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) | @@ -1216,7 +1204,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy), POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK, POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) | POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) | @@ -1227,7 +1215,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, for (ln = 0; ln < 4; ln++) { int level = intel_ddi_level(encoder, crtc_state, ln); - intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy), + intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy), N_SCALAR_MASK, N_SCALAR(trans->entries[level].icl.dw7_n_scalar)); } @@ -1236,7 +1224,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); u32 val; int ln; @@ -1246,12 +1234,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ - val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); + val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val &= ~COMMON_KEEPER_EN; else val |= COMMON_KEEPER_EN; - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); + intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val); /* 2. Program loadgen select */ /* @@ -1261,33 +1249,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln < 4; ln++) { - intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy), LOADGEN_SELECT, icl_combo_phy_loadgen_select(crtc_state, ln)); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), + intel_de_rmw(display, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); /* 4. Clear training enable to change swing values */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); val &= ~TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ icl_ddi_combo_vswing_program(encoder, crtc_state); /* 6. Set training enable to trigger update */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); + val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy)); val |= TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); + intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val); } static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum tc_port tc_port = intel_encoder_to_tc(encoder); const struct intel_ddi_buf_trans *trans; int n_entries, ln; @@ -1296,13 +1284,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, return; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return; for (ln = 0; ln < 2; ln++) { - intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), + intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port), CRI_USE_FS32, 0); - intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), + intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port), CRI_USE_FS32, 0); } @@ -1312,13 +1300,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, level = intel_ddi_level(encoder, crtc_state, 2*ln+0); - intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), + intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_17_12_MASK, CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); level = intel_ddi_level(encoder, crtc_state, 2*ln+1); - intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), + intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_17_12_MASK, CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); } @@ -1329,7 +1317,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, level = intel_ddi_level(encoder, crtc_state, 2*ln+0); - intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), + intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK, CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | @@ -1338,7 +1326,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, level = intel_ddi_level(encoder, crtc_state, 2*ln+1); - intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), + intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port), CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK, CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | @@ -1354,21 +1342,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port), + intel_de_rmw(display, MG_CLKHUB(ln, tc_port), CFG_LOW_RATE_LKREN_EN, crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port), + intel_de_rmw(display, MG_TX1_DCC(ln, tc_port), CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | CFG_AMI_CK_DIV_OVERRIDE_EN, crtc_state->port_clock > 500000 ? CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | CFG_AMI_CK_DIV_OVERRIDE_EN : 0); - intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port), + intel_de_rmw(display, MG_TX2_DCC(ln, tc_port), CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | CFG_AMI_CK_DIV_OVERRIDE_EN, crtc_state->port_clock > 500000 ? @@ -1378,9 +1366,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), + intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port), 0, CRI_CALCINIT); - intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), + intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port), 0, CRI_CALCINIT); } } @@ -1490,12 +1478,12 @@ int intel_ddi_level(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int lane) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_ddi_buf_trans *trans; int level, n_entries; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&i915->drm, !trans)) + if (drm_WARN_ON_ONCE(display->drm, !trans)) return 0; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) @@ -1504,7 +1492,7 @@ int intel_ddi_level(struct intel_encoder *encoder, level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state, lane); - if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries)) + if (drm_WARN_ON_ONCE(display->drm, level >= n_entries)) level = n_entries - 1; return level; @@ -1514,13 +1502,13 @@ static void hsw_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int level = intel_ddi_level(encoder, crtc_state, 0); enum port port = encoder->port; u32 signal_levels; - if (has_iboost(dev_priv)) + if (has_iboost(display)) skl_ddi_set_iboost(encoder, crtc_state, level); /* HDMI ignores the rest */ @@ -1529,46 +1517,46 @@ hsw_set_signal_levels(struct intel_encoder *encoder, signal_levels = DDI_BUF_TRANS_SELECT(level); - drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + drm_dbg_kms(display->drm, "Using signal levels %08x\n", signal_levels); intel_dp->DP &= ~DDI_BUF_EMP_MASK; intel_dp->DP |= signal_levels; - intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); - intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); + intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP); + intel_de_posting_read(display, DDI_BUF_CTL(port)); } -static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, +static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg, u32 clk_sel_mask, u32 clk_sel, u32 clk_off) { - mutex_lock(&i915->display.dpll.lock); + mutex_lock(&display->dpll.lock); - intel_de_rmw(i915, reg, clk_sel_mask, clk_sel); + intel_de_rmw(display, reg, clk_sel_mask, clk_sel); /* * "This step and the step before must be * done with separate register writes." */ - intel_de_rmw(i915, reg, clk_off, 0); + intel_de_rmw(display, reg, clk_off, 0); - mutex_unlock(&i915->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } -static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg, +static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg, u32 clk_off) { - mutex_lock(&i915->display.dpll.lock); + mutex_lock(&display->dpll.lock); - intel_de_rmw(i915, reg, 0, clk_off); + intel_de_rmw(display, reg, 0, clk_off); - mutex_unlock(&i915->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } -static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg, +static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg, u32 clk_off) { - return !(intel_de_read(i915, reg) & clk_off); + return !(intel_de_read(display, reg) & clk_off); } static struct intel_shared_dpll * @@ -1585,14 +1573,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg, static void adls_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_encoder_to_phy(encoder); - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; - _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy), + _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy), ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy), pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1600,19 +1588,19 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder, static void adls_ddi_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy), + _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy), + return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } @@ -1629,14 +1617,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder) static void rkl_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_encoder_to_phy(encoder); - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; - _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0, + _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1644,19 +1632,19 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder, static void rkl_ddi_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, + _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, + return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0, RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } @@ -1673,23 +1661,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder) static void dg1_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_encoder_to_phy(encoder); - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; /* * If we fail this, something went very wrong: first 2 PLLs should be * used by first 2 phys and last 2 PLLs by last phys */ - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, (pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) || (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C))) return; - _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy), + _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1697,19 +1685,19 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder, static void dg1_ddi_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy), + _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy), + return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy), DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } @@ -1739,14 +1727,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder) static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum phy phy = intel_encoder_to_phy(encoder); - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; - _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0, + _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); @@ -1754,19 +1742,19 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder, static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, + _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, + return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } @@ -1783,39 +1771,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder) static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port = encoder->port; - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; /* * "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port. * MG does not exist, but the programming is required to ungate DDIC and DDID." */ - intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG); + intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG); icl_ddi_combo_enable_clock(encoder, crtc_state); } static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; icl_ddi_combo_disable_clock(encoder); - intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); + intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; u32 tmp; - tmp = intel_de_read(i915, DDI_CLK_SEL(port)); + tmp = intel_de_read(display, DDI_CLK_SEL(port)); if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE) return false; @@ -1826,54 +1814,54 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; - intel_de_write(i915, DDI_CLK_SEL(port), + intel_de_write(display, DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); - mutex_lock(&i915->display.dpll.lock); + mutex_lock(&display->dpll.lock); - intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, + intel_de_rmw(display, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0); - mutex_unlock(&i915->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; - mutex_lock(&i915->display.dpll.lock); + mutex_lock(&display->dpll.lock); - intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, + intel_de_rmw(display, ICL_DPCLKA_CFGCR0, 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); - mutex_unlock(&i915->display.dpll.lock); + mutex_unlock(&display->dpll.lock); - intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); + intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum tc_port tc_port = intel_encoder_to_tc(encoder); enum port port = encoder->port; u32 tmp; - tmp = intel_de_read(i915, DDI_CLK_SEL(port)); + tmp = intel_de_read(display, DDI_CLK_SEL(port)); if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE) return false; - tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0); + tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0); return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); } @@ -1934,47 +1922,47 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder) static void skl_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port = encoder->port; - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; - mutex_lock(&i915->display.dpll.lock); + mutex_lock(&display->dpll.lock); - intel_de_rmw(i915, DPLL_CTRL2, + intel_de_rmw(display, DPLL_CTRL2, DPLL_CTRL2_DDI_CLK_OFF(port) | DPLL_CTRL2_DDI_CLK_SEL_MASK(port), DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); - mutex_unlock(&i915->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } static void skl_ddi_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; - mutex_lock(&i915->display.dpll.lock); + mutex_lock(&display->dpll.lock); - intel_de_rmw(i915, DPLL_CTRL2, + intel_de_rmw(display, DPLL_CTRL2, 0, DPLL_CTRL2_DDI_CLK_OFF(port)); - mutex_unlock(&i915->display.dpll.lock); + mutex_unlock(&display->dpll.lock); } static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; /* * FIXME Not sure if the override affects both * the PLL selection and the CLK_OFF bit. */ - return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)); + return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)); } static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder) @@ -2002,30 +1990,30 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder) void hsw_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port = encoder->port; - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return; - intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); + intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); } void hsw_ddi_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; - intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); + intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); } bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; - return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE; + return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE; } static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder) @@ -2081,7 +2069,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder) void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 port_mask; bool ddi_clk_needed; @@ -2101,7 +2089,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) * In the unlikely case that BIOS enables DP in MST mode, just * warn since our MST HW readout is incomplete. */ - if (drm_WARN_ON(&i915->drm, is_mst)) + if (drm_WARN_ON(display->drm, is_mst)) return; } @@ -2116,11 +2104,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) * Sanity check that we haven't incorrectly registered another * encoder using any of the ports of this DSI encoder. */ - for_each_intel_encoder(&i915->drm, other_encoder) { + for_each_intel_encoder(display->drm, other_encoder) { if (other_encoder == encoder) continue; - if (drm_WARN_ON(&i915->drm, + if (drm_WARN_ON(display->drm, port_mask & BIT(other_encoder->port))) return; } @@ -2135,7 +2123,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) !encoder->is_clock_enabled(encoder)) return; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n", encoder->base.base.id, encoder->base.name); @@ -2255,10 +2243,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state) i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (DISPLAY_VER(dev_priv) >= 12) - return TGL_DP_TP_CTL(dev_priv, + if (DISPLAY_VER(display) >= 12) + return TGL_DP_TP_CTL(display, tgl_dp_tp_transcoder(crtc_state)); else return DP_TP_CTL(encoder->port); @@ -2267,10 +2255,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (DISPLAY_VER(dev_priv) >= 12) - return TGL_DP_TP_STATUS(dev_priv, + if (DISPLAY_VER(display) >= 12) + return TGL_DP_TP_STATUS(display, tgl_dp_tp_transcoder(crtc_state)); else return DP_TP_STATUS(encoder->port); @@ -2445,14 +2433,14 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, static void intel_ddi_disable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); if (!crtc_state->fec_enable) return; - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_FEC_ENABLE, 0); - intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state)); } static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, @@ -2474,11 +2462,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, * Splitter enable for eDP MSO is limited to certain pipes, on certain * platforms. */ -static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) +static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display) { - if (DISPLAY_VER(i915) > 20) + if (DISPLAY_VER(display) > 20) return ~0; - else if (IS_ALDERLAKE_P(i915)) + else if (display->platform.alderlake_p) return BIT(PIPE_A) | BIT(PIPE_B); else return BIT(PIPE_A); @@ -2487,28 +2475,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) static void intel_ddi_mso_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct intel_display *display = to_intel_display(pipe_config); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 dss1; - if (!HAS_MSO(i915)) + if (!HAS_MSO(display)) return; - dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); + dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe)); pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE; if (!pipe_config->splitter.enable) return; - if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) { + if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) { pipe_config->splitter.enable = false; return; } switch (dss1 & SPLITTER_CONFIGURATION_MASK) { default: - drm_WARN(&i915->drm, true, + drm_WARN(display->drm, true, "Invalid splitter configuration, dss1=0x%08x\n", dss1); fallthrough; case SPLITTER_CONFIGURATION_2_SEGMENT: @@ -2524,12 +2512,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder, static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 dss1 = 0; - if (!HAS_MSO(i915)) + if (!HAS_MSO(display)) return; if (crtc_state->splitter.enable) { @@ -2541,7 +2529,7 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT; } - intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe), + intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe), SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK | OVERLAP_PIXELS_MASK, dss1); } @@ -2549,27 +2537,27 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) static void mtl_ddi_enable_d2d(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; i915_reg_t reg; u32 set_bits, wait_bits; - if (DISPLAY_VER(dev_priv) < 14) + if (DISPLAY_VER(display) < 14) return; - if (DISPLAY_VER(dev_priv) >= 20) { + if (DISPLAY_VER(display) >= 20) { reg = DDI_BUF_CTL(port); set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE; wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE; } else { - reg = XELPDP_PORT_BUF_CTL1(dev_priv, port); + reg = XELPDP_PORT_BUF_CTL1(display, port); set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE; wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE; } - intel_de_rmw(dev_priv, reg, 0, set_bits); - if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) { - drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n", + intel_de_rmw(display, reg, 0, set_bits); + if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) { + drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n", port_name(port)); } } @@ -2599,13 +2587,13 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder, static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 val; val = intel_tc_port_in_tbt_alt_mode(dig_port) ? XELPDP_PORT_BUF_IO_SELECT_TBT : 0; - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), XELPDP_PORT_BUF_IO_SELECT_TBT, val); } @@ -2734,7 +2722,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int ret; @@ -2778,7 +2765,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { - drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(display, dig_port->ddi_io_power_domain); } @@ -2882,16 +2869,15 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); - if (DISPLAY_VER(dev_priv) < 11) - drm_WARN_ON(&dev_priv->drm, + if (DISPLAY_VER(display) < 11) + drm_WARN_ON(display->drm, is_mst && (port == PORT_A || port == PORT_E)); else - drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A); + drm_WARN_ON(display->drm, is_mst && port == PORT_A); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, @@ -2908,14 +2894,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_clock(encoder, crtc_state); if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { - drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(display, dig_port->ddi_io_power_domain); } icl_program_mg_dp_mode(dig_port, crtc_state); - if (has_buf_trans_select(dev_priv)) + if (has_buf_trans_select(display)) hsw_prepare_dp_ddi_buffers(encoder, crtc_state); encoder->set_signal_levels(encoder, crtc_state); @@ -2931,7 +2917,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, crtc_state); intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); intel_dp_start_link_train(state, intel_dp, crtc_state); - if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) && + if ((port != PORT_A || DISPLAY_VER(display) >= 9) && !is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp, crtc_state); @@ -2979,12 +2965,11 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_enable_clock(encoder, crtc_state); - drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(display, dig_port->ddi_io_power_domain); @@ -3022,10 +3007,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, { struct intel_display *display = to_intel_display(state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder); + drm_WARN_ON(display->drm, crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(display, pipe, true); @@ -3050,27 +3034,27 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, static void mtl_ddi_disable_d2d(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; i915_reg_t reg; u32 clr_bits, wait_bits; - if (DISPLAY_VER(dev_priv) < 14) + if (DISPLAY_VER(display) < 14) return; - if (DISPLAY_VER(dev_priv) >= 20) { + if (DISPLAY_VER(display) >= 20) { reg = DDI_BUF_CTL(port); clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE; wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE; } else { - reg = XELPDP_PORT_BUF_CTL1(dev_priv, port); + reg = XELPDP_PORT_BUF_CTL1(display, port); clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE; wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE; } - intel_de_rmw(dev_priv, reg, clr_bits, 0); - if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100)) - drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n", + intel_de_rmw(display, reg, clr_bits, 0); + if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100)) + drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n", port_name(port)); } @@ -3089,10 +3073,9 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); + intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); if (DISPLAY_VER(display) >= 14) intel_wait_ddi_buf_idle(display, port); @@ -3100,7 +3083,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder, mtl_ddi_disable_d2d(encoder); if (intel_crtc_has_dp_encoder(crtc_state)) { - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_ENABLE, 0); } @@ -3118,7 +3101,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = &dig_port->dp; intel_wakeref_t wakeref; @@ -3135,12 +3117,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, */ intel_dp_set_power(intel_dp, DP_SET_POWER_D3); - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { if (is_mst || intel_dp_is_uhbr(old_crtc_state)) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - intel_de_rmw(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder), + intel_de_rmw(display, + TRANS_DDI_FUNC_CTL(display, cpu_transcoder), TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK, 0); } @@ -3160,7 +3142,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, * Configure Transcoder Clock select to direct no clock to the * transcoder" */ - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) intel_ddi_disable_transcoder_clock(old_crtc_state); intel_pps_vdd_on(intel_dp); @@ -3176,8 +3158,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, intel_ddi_disable_clock(encoder); /* De-select Thunderbolt */ - if (DISPLAY_VER(dev_priv) >= 14) - intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port), + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), XELPDP_PORT_BUF_IO_SELECT_TBT, 0); } @@ -3187,7 +3169,6 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; intel_wakeref_t wakeref; @@ -3195,12 +3176,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(display) < 12) intel_ddi_disable_transcoder_clock(old_crtc_state); intel_ddi_buf_disable(encoder, old_crtc_state); - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(display) >= 12) intel_ddi_disable_transcoder_clock(old_crtc_state); wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref); @@ -3220,7 +3201,6 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *pipe_crtc; bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI); @@ -3249,6 +3229,8 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0); } + intel_vrr_transcoder_disable(old_crtc_state); + intel_ddi_disable_transcoder_func(old_crtc_state); for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { @@ -3257,7 +3239,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state, intel_dsc_disable(old_pipe_crtc_state); - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(display) >= 9) skl_scaler_disable(old_pipe_crtc_state); else ilk_pfit_disable(old_pipe_crtc_state); @@ -3359,12 +3341,12 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum port port = encoder->port; - if (port == PORT_A && DISPLAY_VER(dev_priv) < 9) + if (port == PORT_A && DISPLAY_VER(display) < 9) intel_dp_stop_link_train(intel_dp, crtc_state); drm_connector_update_privacy_screen(conn_state); @@ -3401,7 +3383,6 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; @@ -3410,11 +3391,11 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, crtc_state->hdmi_scrambling)) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", connector->base.id, connector->name); - if (has_buf_trans_select(dev_priv)) + if (has_buf_trans_select(display)) hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state); /* e. Enable D2D Link for C10/C20 Phy */ @@ -3423,7 +3404,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, encoder->set_signal_levels(encoder, crtc_state); /* Display WA #1143: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { + if (DISPLAY_VER(display) == 9 && !display->platform.broxton) { /* * For some reason these chicken bits have been * stuffed into a transcoder register, event though @@ -3433,7 +3414,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port); u32 val; - val = intel_de_read(dev_priv, reg); + val = intel_de_read(display, reg); if (port == PORT_E) val |= DDIE_TRAINING_OVERRIDE_ENABLE | @@ -3442,8 +3423,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, val |= DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE; - intel_de_write(dev_priv, reg, val); - intel_de_posting_read(dev_priv, reg); + intel_de_write(display, reg, val); + intel_de_posting_read(display, reg); udelay(1); @@ -3454,7 +3435,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE); - intel_de_write(dev_priv, reg, val); + intel_de_write(display, reg, val); } intel_ddi_power_up_lanes(encoder, crtc_state); @@ -3475,7 +3456,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, if (dig_port->ddi_a_4_lanes) buf_ctl |= DDI_A_4_LANES; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { u32 port_buf = 0; port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count); @@ -3483,15 +3464,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, if (dig_port->lane_reversal) port_buf |= XELPDP_PORT_REVERSAL; - intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port), + intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port), XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf); buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count); - if (DISPLAY_VER(dev_priv) >= 20) + if (DISPLAY_VER(display) >= 20) buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE; - } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) { - drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); + } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) { + drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port)); buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; } @@ -3522,6 +3503,8 @@ static void intel_ddi_enable(struct intel_atomic_state *state, intel_ddi_enable_transcoder_func(encoder, crtc_state); + intel_vrr_transcoder_enable(crtc_state); + /* Enable/Disable DP2.0 SDP split config before transcoder */ intel_audio_sdp_split_update(crtc_state); @@ -3567,7 +3550,7 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state, struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - intel_dp->link_trained = false; + intel_dp->link.active = false; intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); @@ -3584,12 +3567,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_connector *connector = old_conn_state->connector; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", connector->base.id, connector->name); } @@ -3653,16 +3636,16 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_crtc *pipe_crtc; /* FIXME: Add MTL pll_mgr */ - if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder)) + if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder)) return; - for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc, + for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc, intel_crtc_joined_pipe_mask(crtc_state)) intel_update_active_dpll(state, pipe_crtc, encoder); } @@ -3678,7 +3661,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_tc_port = intel_encoder_is_tc(encoder); @@ -3697,7 +3680,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, * Type-C ports. Skip this step for TBT. */ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) bxt_dpio_phy_set_lane_optim_mask(encoder, crtc_state->lane_lat_optim_mask); } @@ -3765,10 +3748,9 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 dp_tp_ctl; - dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)); drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE); @@ -3781,10 +3763,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, if (crtc_state->enhanced_framing) dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); - intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); + intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state)); - if (IS_ALDERLAKE_P(dev_priv) && + if (display->platform.alderlake_p && (intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port))) adlp_tbt_to_dp_alt_switch_wa(encoder); @@ -3796,11 +3778,11 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 temp; - temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)); temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; switch (intel_dp_training_pattern_symbol(dp_train_pat)) { @@ -3821,17 +3803,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp, break; } - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp); + intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp); } static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE); /* @@ -3841,28 +3823,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, * In this case there is requirement to wait for a minimum number of * idle patterns to be sent. */ - if (port == PORT_A && DISPLAY_VER(dev_priv) < 12) + if (port == PORT_A && DISPLAY_VER(display) < 12) return; - if (intel_de_wait_for_set(dev_priv, + if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_IDLE_DONE, 2)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timed out waiting for DP idle patterns\n"); } -static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, +static bool intel_ddi_is_audio_enabled(struct intel_display *display, enum transcoder cpu_transcoder) { - struct intel_display *display = &dev_priv->display; - if (cpu_transcoder == TRANSCODER_EDP) return false; if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO)) return false; - return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) & + return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) & AUDIO_OUTPUT_ENABLE(cpu_transcoder); } @@ -3892,34 +3872,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state) void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state); - else if (DISPLAY_VER(dev_priv) >= 12) + else if (DISPLAY_VER(display) >= 12) crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state); - else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) + else if (display->platform.jasperlake || display->platform.elkhartlake) crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state); - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state); } -static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv, +static enum transcoder bdw_transcoder_master_readout(struct intel_display *display, enum transcoder cpu_transcoder) { u32 master_select; - if (DISPLAY_VER(dev_priv) >= 11) { - u32 ctl2 = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder)); + if (DISPLAY_VER(display) >= 11) { + u32 ctl2 = intel_de_read(display, + TRANS_DDI_FUNC_CTL2(display, cpu_transcoder)); if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0) return INVALID_TRANSCODER; master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2); } else { - u32 ctl = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); + u32 ctl = intel_de_read(display, + TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0) return INVALID_TRANSCODER; @@ -3936,15 +3916,14 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); enum transcoder cpu_transcoder; crtc_state->master_transcoder = - bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder); + bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder); - for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { + for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) { enum intel_display_power_domain power_domain; intel_wakeref_t trans_wakeref; @@ -3955,14 +3934,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) if (!trans_wakeref) continue; - if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) == + if (bdw_transcoder_master_readout(display, cpu_transcoder) == crtc_state->cpu_transcoder) crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); intel_display_power_put(display, power_domain, trans_wakeref); } - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, crtc_state->master_transcoder != INVALID_TRANSCODER && crtc_state->sync_mode_slaves_mask); } @@ -4085,11 +4064,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 ddi_func_ctl, ddi_mode, flags = 0; - ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)); + ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)); if (ddi_func_ctl & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; else @@ -4131,13 +4109,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) { intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl); } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) { - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); /* * If this is true, we know we're being called from mst stream * encoder's ->get_config(). */ - if (intel_dp_mst_encoder_active_links(dig_port)) + if (intel_dp_mst_active_streams(intel_dp)) intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl); else intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl); @@ -4152,11 +4130,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, static void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; /* XXX: DSI transcoder paranoia */ - if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) + if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder))) return; intel_ddi_read_func_ctl(encoder, pipe_config); @@ -4164,14 +4142,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, intel_ddi_mso_get_config(encoder, pipe_config); pipe_config->has_audio = - intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); + intel_ddi_is_audio_enabled(display, cpu_transcoder); if (encoder->type == INTEL_OUTPUT_EDP) intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp); ddi_dotclock_get(pipe_config); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) pipe_config->lane_lat_optim_mask = bxt_dpio_phy_get_lane_lat_optim_mask(encoder); @@ -4192,7 +4170,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, HDMI_INFOFRAME_TYPE_DRM, &pipe_config->infoframes.drm); - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) bdw_get_trans_port_sync_config(pipe_config); intel_psr_get_config(encoder, pipe_config); @@ -4285,10 +4263,10 @@ static enum icl_port_dpll_id icl_ddi_tc_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - if (drm_WARN_ON(&i915->drm, !pll)) + if (drm_WARN_ON(display->drm, !pll)) return ICL_PORT_DPLL_DEFAULT; if (icl_ddi_tc_pll_is_tbt(pll)) @@ -4382,11 +4360,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder, static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); bool fastset = true; if (intel_encoder_is_tc(encoder)) { - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n", encoder->base.base.id, encoder->base.name); crtc_state->uapi.mode_changed = true; fastset = false; @@ -4421,12 +4399,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int ret; - if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) + if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) { @@ -4441,13 +4419,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, if (ret) return ret; - if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A && + if (display->platform.haswell && crtc->pipe == PIPE_A && pipe_config->cpu_transcoder == TRANSCODER_EDP) pipe_config->pch_pfit.force_thru = pipe_config->pch_pfit.enabled || pipe_config->crc_enabled; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) pipe_config->lane_lat_optim_mask = bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); @@ -4498,9 +4476,9 @@ static u8 intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state, int tile_group_id) { + struct intel_display *display = to_intel_display(ref_crtc_state); struct drm_connector *connector; const struct drm_connector_state *conn_state; - struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev); struct intel_atomic_state *state = to_intel_atomic_state(ref_crtc_state->uapi.state); u8 transcoders = 0; @@ -4510,7 +4488,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state, * We don't enable port sync on BDW due to missing w/as and * due to not having adjusted the modeset sequence appropriately. */ - if (DISPLAY_VER(dev_priv) < 9) + if (DISPLAY_VER(display) < 9) return 0; if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP)) @@ -4542,11 +4520,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct drm_connector *connector = conn_state->connector; u8 port_sync_transcoders = 0; - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n", encoder->base.base.id, encoder->base.name, crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name); @@ -4618,7 +4596,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_connector *connector; enum port port = dig_port->base.port; @@ -4627,7 +4605,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) return -ENOMEM; dig_port->dp.output_reg = DDI_BUF_CTL(port); - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain; else dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; @@ -4643,15 +4621,14 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) } if (dig_port->base.type == INTEL_OUTPUT_EDP) { - struct drm_device *dev = dig_port->base.base.dev; struct drm_privacy_screen *privacy_screen; - privacy_screen = drm_privacy_screen_get(dev->dev, NULL); + privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL); if (!IS_ERR(privacy_screen)) { drm_connector_attach_privacy_screen_provider(&connector->base, privacy_screen); } else if (PTR_ERR(privacy_screen) != -ENODEV) { - drm_warn(dev, "Error getting privacy-screen\n"); + drm_warn(display->drm, "Error getting privacy-screen\n"); } } @@ -4662,7 +4639,6 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); struct intel_connector *connector = hdmi->attached_connector; struct i2c_adapter *ddc = connector->base.ddc; @@ -4675,7 +4651,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, if (connector->base.status != connector_status_connected) return 0; - ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, + ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx); if (ret) return ret; @@ -4692,7 +4668,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, crtc_state = to_intel_crtc_state(crtc->base.state); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(display->drm, !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); if (!crtc_state->hw.active) @@ -4708,7 +4684,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config); if (ret < 0) { - drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n", + drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n", connector->base.base.id, connector->base.name, ret); return 0; } @@ -4733,11 +4709,11 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, static void intel_ddi_link_check(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); /* TODO: Move checking the HDMI link state here as well. */ - drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector); + drm_WARN_ON(display->drm, !dig_port->dp.attached_connector); intel_dp_link_check(encoder); } @@ -4800,26 +4776,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder, static bool lpt_digital_port_connected(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; + struct intel_display *display = to_intel_display(encoder); + u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin]; - return intel_de_read(dev_priv, SDEISR) & bit; + return intel_de_read(display, SDEISR) & bit; } static bool hsw_digital_port_connected(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; + struct intel_display *display = to_intel_display(encoder); + u32 bit = display->hotplug.hpd[encoder->hpd_pin]; - return intel_de_read(dev_priv, DEISR) & bit; + return intel_de_read(display, DEISR) & bit; } static bool bdw_digital_port_connected(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; + struct intel_display *display = to_intel_display(encoder); + u32 bit = display->hotplug.hpd[encoder->hpd_pin]; - return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; + return intel_de_read(display, GEN8_DE_PORT_ISR) & bit; } static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port) @@ -4848,7 +4824,7 @@ static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port) static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); if (dig_port->base.port != PORT_A) return false; @@ -4859,7 +4835,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only * supported configuration */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) return true; return false; @@ -4868,15 +4844,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) static int intel_ddi_max_lanes(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); enum port port = dig_port->base.port; int max_lanes = 4; - if (DISPLAY_VER(dev_priv) >= 11) + if (DISPLAY_VER(display) >= 11) return max_lanes; if (port == PORT_A || port == PORT_E) { - if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) max_lanes = port == PORT_A ? 4 : 0; else /* Both A and E share 2 lanes */ @@ -4889,7 +4865,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port) * so we use the proper lane count for our calculations. */ if (intel_ddi_a_force_4_lanes(dig_port)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Forcing DDI_A_4_LANES for port A\n"); dig_port->ddi_a_4_lanes = true; max_lanes = 4; @@ -4898,8 +4874,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port) return max_lanes; } -static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv, - enum port port) +static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port) { if (port >= PORT_D_XELPD) return HPD_PORT_D + port - PORT_D_XELPD; @@ -4909,8 +4884,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } -static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv, - enum port port) +static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port) { if (port >= PORT_TC1) return HPD_PORT_C + port - PORT_TC1; @@ -4918,8 +4892,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } -static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv, - enum port port) +static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port) { if (port >= PORT_TC1) return HPD_PORT_TC1 + port - PORT_TC1; @@ -4927,11 +4900,12 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } -static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv, - enum port port) +static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (HAS_PCH_TGP(dev_priv)) - return tgl_hpd_pin(dev_priv, port); + return tgl_hpd_pin(display, port); if (port >= PORT_TC1) return HPD_PORT_C + port - PORT_TC1; @@ -4939,8 +4913,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } -static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv, - enum port port) +static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port) { if (port >= PORT_C) return HPD_PORT_TC1 + port - PORT_C; @@ -4948,31 +4921,34 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } -static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv, - enum port port) +static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (port == PORT_D) return HPD_PORT_A; if (HAS_PCH_TGP(dev_priv)) - return icl_hpd_pin(dev_priv, port); + return icl_hpd_pin(display, port); return HPD_PORT_A + port - PORT_A; } -static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) +static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (HAS_PCH_TGP(dev_priv)) - return icl_hpd_pin(dev_priv, port); + return icl_hpd_pin(display, port); return HPD_PORT_A + port - PORT_A; } -static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port) +static bool intel_ddi_is_tc(struct intel_display *display, enum port port) { - if (DISPLAY_VER(i915) >= 12) + if (DISPLAY_VER(display) >= 12) return port >= PORT_TC1; - else if (DISPLAY_VER(i915) >= 11) + else if (DISPLAY_VER(display) >= 11) return port >= PORT_C; else return false; @@ -5015,21 +4991,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder #define port_tc_name(port) ((port) - PORT_TC1 + '1') #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1') -static bool port_strap_detected(struct drm_i915_private *i915, enum port port) +static bool port_strap_detected(struct intel_display *display, enum port port) { /* straps not used on skl+ */ - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) return true; switch (port) { case PORT_A: - return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; + return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; case PORT_B: - return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED; + return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED; case PORT_C: - return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED; + return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED; case PORT_D: - return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED; + return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED; case PORT_E: return true; /* no strap for DDI-E */ default: @@ -5043,18 +5019,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp) return init_dp || intel_encoder_is_tc(encoder); } -static bool assert_has_icl_dsi(struct drm_i915_private *i915) +static bool assert_has_icl_dsi(struct intel_display *display) { - return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) && - !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11, + return !drm_WARN(display->drm, !display->platform.alderlake_p && + !display->platform.tigerlake && DISPLAY_VER(display) != 11, "Platform does not support DSI\n"); } -static bool port_in_use(struct drm_i915_private *i915, enum port port) +static bool port_in_use(struct intel_display *display, enum port port) { struct intel_encoder *encoder; - for_each_intel_encoder(&i915->drm, encoder) { + for_each_intel_encoder(display->drm, encoder) { /* FIXME what about second port for dual link DSI? */ if (encoder->port == port) return true; @@ -5066,7 +5042,6 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port) void intel_ddi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port; struct intel_encoder *encoder; bool init_hdmi, init_dp; @@ -5078,8 +5053,8 @@ void intel_ddi_init(struct intel_display *display, if (port == PORT_NONE) return; - if (!port_strap_detected(dev_priv, port)) { - drm_dbg_kms(&dev_priv->drm, + if (!port_strap_detected(display, port)) { + drm_dbg_kms(display->drm, "Port %c strap not detected\n", port_name(port)); return; } @@ -5087,15 +5062,15 @@ void intel_ddi_init(struct intel_display *display, if (!assert_port_valid(display, port)) return; - if (port_in_use(dev_priv, port)) { - drm_dbg_kms(&dev_priv->drm, + if (port_in_use(display, port)) { + drm_dbg_kms(display->drm, "Port %c already claimed\n", port_name(port)); return; } if (intel_bios_encoder_supports_dsi(devdata)) { /* BXT/GLK handled elsewhere, for now at least */ - if (!assert_has_icl_dsi(dev_priv)) + if (!assert_has_icl_dsi(display)) return; icl_dsi_init(display, devdata); @@ -5111,7 +5086,7 @@ void intel_ddi_init(struct intel_display *display, * outputs. */ if (intel_hti_uses_phy(display, phy)) { - drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n", + drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n", port_name(port), phy_name(phy)); return; } @@ -5128,20 +5103,20 @@ void intel_ddi_init(struct intel_display *display, */ init_dp = true; init_hdmi = false; - drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n", + drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n", port_name(port)); } if (!init_dp && !init_hdmi) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n", port_name(port)); return; } if (intel_phy_is_snps(display, phy) && - dev_priv->display.snps.phy_failed_calibration & BIT(phy)) { - drm_dbg_kms(&dev_priv->drm, + display->snps.phy_failed_calibration & BIT(phy)) { + drm_dbg_kms(display->drm, "SNPS PHY %c failed to calibrate, proceeding anyway\n", phy_name(phy)); } @@ -5155,26 +5130,26 @@ void intel_ddi_init(struct intel_display *display, encoder = &dig_port->base; encoder->devdata = devdata; - if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) { - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) { + drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c/PHY %c", port_name(port - PORT_D_XELPD + PORT_D), phy_name(phy)); - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { enum tc_port tc_port = intel_port_to_tc(display, port); - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %s%c/PHY %s%c", port >= PORT_TC1 ? "TC" : "", port >= PORT_TC1 ? port_tc_name(port) : port_name(port), tc_port != TC_PORT_NONE ? "TC" : "", tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); - } else if (DISPLAY_VER(dev_priv) >= 11) { + } else if (DISPLAY_VER(display) >= 11) { enum tc_port tc_port = intel_port_to_tc(display, port); - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c%s/PHY %s%c", port_name(port), @@ -5182,7 +5157,7 @@ void intel_ddi_init(struct intel_display *display, tc_port != TC_PORT_NONE ? "TC" : "", tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); } else { - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c/PHY %c", port_name(port), phy_name(phy)); } @@ -5218,32 +5193,32 @@ void intel_ddi_init(struct intel_display *display, encoder->cloneable = 0; encoder->pipe_mask = ~0; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { encoder->enable_clock = intel_mtl_pll_enable; encoder->disable_clock = intel_mtl_pll_disable; encoder->port_pll_type = intel_mtl_port_pll_type; encoder->get_config = mtl_ddi_get_config; - } else if (IS_DG2(dev_priv)) { + } else if (display->platform.dg2) { encoder->enable_clock = intel_mpllb_enable; encoder->disable_clock = intel_mpllb_disable; encoder->get_config = dg2_ddi_get_config; - } else if (IS_ALDERLAKE_S(dev_priv)) { + } else if (display->platform.alderlake_s) { encoder->enable_clock = adls_ddi_enable_clock; encoder->disable_clock = adls_ddi_disable_clock; encoder->is_clock_enabled = adls_ddi_is_clock_enabled; encoder->get_config = adls_ddi_get_config; - } else if (IS_ROCKETLAKE(dev_priv)) { + } else if (display->platform.rocketlake) { encoder->enable_clock = rkl_ddi_enable_clock; encoder->disable_clock = rkl_ddi_disable_clock; encoder->is_clock_enabled = rkl_ddi_is_clock_enabled; encoder->get_config = rkl_ddi_get_config; - } else if (IS_DG1(dev_priv)) { + } else if (display->platform.dg1) { encoder->enable_clock = dg1_ddi_enable_clock; encoder->disable_clock = dg1_ddi_disable_clock; encoder->is_clock_enabled = dg1_ddi_is_clock_enabled; encoder->get_config = dg1_ddi_get_config; - } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { - if (intel_ddi_is_tc(dev_priv, port)) { + } else if (display->platform.jasperlake || display->platform.elkhartlake) { + if (intel_ddi_is_tc(display, port)) { encoder->enable_clock = jsl_ddi_tc_enable_clock; encoder->disable_clock = jsl_ddi_tc_disable_clock; encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled; @@ -5255,8 +5230,8 @@ void intel_ddi_init(struct intel_display *display, encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled; encoder->get_config = icl_ddi_combo_get_config; } - } else if (DISPLAY_VER(dev_priv) >= 11) { - if (intel_ddi_is_tc(dev_priv, port)) { + } else if (DISPLAY_VER(display) >= 11) { + if (intel_ddi_is_tc(display, port)) { encoder->enable_clock = icl_ddi_tc_enable_clock; encoder->disable_clock = icl_ddi_tc_disable_clock; encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled; @@ -5268,36 +5243,36 @@ void intel_ddi_init(struct intel_display *display, encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled; encoder->get_config = icl_ddi_combo_get_config; } - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + } else if (display->platform.geminilake || display->platform.broxton) { /* BXT/GLK have fixed PLL->port mapping */ encoder->get_config = bxt_ddi_get_config; - } else if (DISPLAY_VER(dev_priv) == 9) { + } else if (DISPLAY_VER(display) == 9) { encoder->enable_clock = skl_ddi_enable_clock; encoder->disable_clock = skl_ddi_disable_clock; encoder->is_clock_enabled = skl_ddi_is_clock_enabled; encoder->get_config = skl_ddi_get_config; - } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + } else if (display->platform.broadwell || display->platform.haswell) { encoder->enable_clock = hsw_ddi_enable_clock; encoder->disable_clock = hsw_ddi_disable_clock; encoder->is_clock_enabled = hsw_ddi_is_clock_enabled; encoder->get_config = hsw_ddi_get_config; } - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { encoder->set_signal_levels = intel_cx0_phy_set_signal_levels; - } else if (IS_DG2(dev_priv)) { + } else if (display->platform.dg2) { encoder->set_signal_levels = intel_snps_phy_set_signal_levels; - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { if (intel_encoder_is_combo(encoder)) encoder->set_signal_levels = icl_combo_phy_set_signal_levels; else encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels; - } else if (DISPLAY_VER(dev_priv) >= 11) { + } else if (DISPLAY_VER(display) >= 11) { if (intel_encoder_is_combo(encoder)) encoder->set_signal_levels = icl_combo_phy_set_signal_levels; else encoder->set_signal_levels = icl_mg_phy_set_signal_levels; - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + } else if (display->platform.geminilake || display->platform.broxton) { encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels; } else { encoder->set_signal_levels = hsw_set_signal_levels; @@ -5305,29 +5280,29 @@ void intel_ddi_init(struct intel_display *display, intel_ddi_buf_trans_init(encoder); - if (DISPLAY_VER(dev_priv) >= 13) - encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port); - else if (IS_DG1(dev_priv)) - encoder->hpd_pin = dg1_hpd_pin(dev_priv, port); - else if (IS_ROCKETLAKE(dev_priv)) - encoder->hpd_pin = rkl_hpd_pin(dev_priv, port); - else if (DISPLAY_VER(dev_priv) >= 12) - encoder->hpd_pin = tgl_hpd_pin(dev_priv, port); - else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) - encoder->hpd_pin = ehl_hpd_pin(dev_priv, port); - else if (DISPLAY_VER(dev_priv) == 11) - encoder->hpd_pin = icl_hpd_pin(dev_priv, port); - else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - encoder->hpd_pin = skl_hpd_pin(dev_priv, port); + if (DISPLAY_VER(display) >= 13) + encoder->hpd_pin = xelpd_hpd_pin(display, port); + else if (display->platform.dg1) + encoder->hpd_pin = dg1_hpd_pin(display, port); + else if (display->platform.rocketlake) + encoder->hpd_pin = rkl_hpd_pin(display, port); + else if (DISPLAY_VER(display) >= 12) + encoder->hpd_pin = tgl_hpd_pin(display, port); + else if (display->platform.jasperlake || display->platform.elkhartlake) + encoder->hpd_pin = ehl_hpd_pin(display, port); + else if (DISPLAY_VER(display) == 11) + encoder->hpd_pin = icl_hpd_pin(display, port); + else if (DISPLAY_VER(display) == 9 && !display->platform.broxton) + encoder->hpd_pin = skl_hpd_pin(display, port); else encoder->hpd_pin = intel_hpd_pin_default(port); - ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port)); + ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port)); dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) || ddi_buf_ctl & DDI_BUF_PORT_REVERSAL; - dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES; + dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES; dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); @@ -5346,7 +5321,7 @@ void intel_ddi_init(struct intel_display *display, if (!is_legacy && init_hdmi) { is_legacy = !init_dp; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n", port_name(port), str_yes_no(init_dp), @@ -5363,24 +5338,24 @@ void intel_ddi_init(struct intel_display *display, goto err; } - drm_WARN_ON(&dev_priv->drm, port > PORT_I); + drm_WARN_ON(display->drm, port > PORT_I); dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port); - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(display) >= 11) { if (intel_encoder_is_tc(encoder)) dig_port->connected = intel_tc_port_connected; else dig_port->connected = lpt_digital_port_connected; - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + } else if (display->platform.geminilake || display->platform.broxton) { dig_port->connected = bdw_digital_port_connected; - } else if (DISPLAY_VER(dev_priv) == 9) { + } else if (DISPLAY_VER(display) == 9) { dig_port->connected = lpt_digital_port_connected; - } else if (IS_BROADWELL(dev_priv)) { + } else if (display->platform.broadwell) { if (port == PORT_A) dig_port->connected = bdw_digital_port_connected; else dig_port->connected = lpt_digital_port_connected; - } else if (IS_HASWELL(dev_priv)) { + } else if (display->platform.haswell) { if (port == PORT_A) dig_port->connected = hsw_digital_port_connected; else @@ -5396,7 +5371,7 @@ void intel_ddi_init(struct intel_display *display, dig_port->hpd_pulse = intel_dp_hpd_pulse; if (dig_port->dp.mso_link_count) - encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); + encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display); } /* diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index b7399e9d11cc..655467a6ba87 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -181,20 +181,18 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg, } static inline int -__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg, - u32 mask, unsigned int timeout) +intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout) { return intel_de_wait(display, reg, mask, mask, timeout); } -#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__) static inline int -__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg, - u32 mask, unsigned int timeout) +intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg, + u32 mask, unsigned int timeout) { return intel_de_wait(display, reg, mask, 0, timeout); } -#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__) /* * Unlocked mmio-accessors, think carefully before using these. @@ -205,7 +203,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg, * a more localised lock guarding all access to that bank of registers. */ static inline u32 -__intel_de_read_fw(struct intel_display *display, i915_reg_t reg) +intel_de_read_fw(struct intel_display *display, i915_reg_t reg) { u32 val; @@ -214,15 +212,13 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg) return val; } -#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__) static inline void -__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val) +intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val) { trace_i915_reg_rw(true, reg, val, sizeof(val), true); intel_uncore_write_fw(__to_uncore(display), reg, val); } -#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__) static inline u32 intel_de_read_notrace(struct intel_display *display, i915_reg_t reg) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3b509c70fb58..db524d01e574 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -73,6 +73,7 @@ #include "intel_de.h" #include "intel_display_driver.h" #include "intel_display_power.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" @@ -663,7 +664,6 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane *plane) { struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -696,7 +696,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, * wait-for-vblank between disabling the plane and the pipe. */ if (HAS_GMCH(display) && - intel_set_memory_cxsr(dev_priv, false)) + intel_set_memory_cxsr(display, false)) intel_plane_initial_vblank_wait(crtc); /* @@ -1050,12 +1050,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; - intel_psr_post_plane_update(state, crtc); - intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) - intel_update_watermarks(dev_priv); + intel_update_watermarks(display); intel_fbc_post_update(state, crtc); @@ -1080,6 +1078,8 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (audio_enabling(old_crtc_state, new_crtc_state)) intel_encoders_audio_enable(state, crtc); + + intel_psr_post_plane_update(state, crtc); } static void intel_post_plane_update_after_readout(struct intel_atomic_state *state, @@ -1168,13 +1168,14 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; + intel_psr_pre_plane_update(state, crtc); + if (intel_crtc_vrr_disabling(state, crtc)) { intel_vrr_disable(old_crtc_state); intel_crtc_update_active_timings(old_crtc_state, false); @@ -1185,8 +1186,6 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_drrs_deactivate(old_crtc_state); - intel_psr_pre_plane_update(state, crtc); - if (hsw_ips_pre_update(state, crtc)) intel_crtc_wait_for_next_vblank(crtc); @@ -1222,7 +1221,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * wait-for-vblank between disabling the plane and the pipe. */ if (HAS_GMCH(display) && old_crtc_state->hw.active && - new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) + new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false)) intel_crtc_wait_for_next_vblank(crtc); /* @@ -1233,7 +1232,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * WaCxSRDisabledForSpriteScaling:ivb */ if (!HAS_GMCH(display) && old_crtc_state->hw.active && - new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv)) + new_crtc_state->disable_cxsr && ilk_disable_cxsr(display)) intel_crtc_wait_for_next_vblank(crtc); /* @@ -1257,7 +1256,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (!intel_initial_watermarks(state, crtc)) if (new_crtc_state->update_wm_pre) - intel_update_watermarks(dev_priv); + intel_update_watermarks(display); } /* @@ -1662,13 +1661,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_pll_enable(state, crtc); - for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { - const struct intel_crtc_state *pipe_crtc_state = - intel_atomic_get_new_crtc_state(state, pipe_crtc); - - if (pipe_crtc_state->shared_dpll) - intel_enable_shared_dpll(pipe_crtc_state); - } + if (new_crtc_state->shared_dpll) + intel_enable_shared_dpll(new_crtc_state); intel_encoders_pre_enable(state, crtc); @@ -1779,8 +1773,6 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(display, pipe, true); intel_set_pch_fifo_underrun_reporting(display, pipe, true); - - intel_disable_shared_dpll(old_crtc_state); } static void hsw_crtc_disable(struct intel_atomic_state *state, @@ -1799,12 +1791,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_encoders_disable(state, crtc); intel_encoders_post_disable(state, crtc); - for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { - const struct intel_crtc_state *old_pipe_crtc_state = - intel_atomic_get_old_crtc_state(state, pipe_crtc); - - intel_disable_shared_dpll(old_pipe_crtc_state); - } + intel_disable_shared_dpll(old_crtc_state); intel_encoders_post_pll_disable(state, crtc); @@ -2083,7 +2070,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (drm_WARN_ON(display->drm, crtc->active)) @@ -2107,7 +2093,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, intel_color_modeset(new_crtc_state); if (!intel_initial_watermarks(state, crtc)) - intel_update_watermarks(dev_priv); + intel_update_watermarks(display); intel_enable_transcoder(new_crtc_state); intel_crtc_vblank_on(new_crtc_state); @@ -2123,7 +2109,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; @@ -2147,9 +2132,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (display->platform.cherryview) - chv_disable_pll(dev_priv, pipe); + chv_disable_pll(display, pipe); else if (display->platform.valleyview) - vlv_disable_pll(dev_priv, pipe); + vlv_disable_pll(display, pipe); else i9xx_disable_pll(old_crtc_state); } @@ -2160,7 +2145,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(display, pipe, false); if (!display->funcs.wm->initial_watermarks) - intel_update_watermarks(dev_priv); + intel_update_watermarks(display); /* clock the pipe down to 640x480@60 to potentially save power */ if (display->platform.i830) @@ -2343,7 +2328,6 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); intel_joiner_compute_pipe_src(crtc_state); @@ -2362,7 +2346,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_is_dual_link_lvds(i915)) { + intel_is_dual_link_lvds(display)) { drm_dbg_kms(display->drm, "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", crtc->base.base.id, crtc->base.name); @@ -2639,6 +2623,15 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, PIPE_LINK_N2(display, transcoder)); } +static bool +transcoder_has_vrr(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder); +} + static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -2703,6 +2696,15 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); + /* + * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal + * bits are not required. Since the support for these bits is going to + * be deprecated in upcoming platforms, avoid writing these bits for the + * platforms that do not use legacy Timing Generator. + */ + if (intel_vrr_always_use_vrr_tg(display)) + crtc_vtotal = 1; + intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), VACTIVE(crtc_vdisplay - 1) | VTOTAL(crtc_vtotal - 1)); @@ -2764,12 +2766,24 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc VBLANK_START(crtc_vblank_start - 1) | VBLANK_END(crtc_vblank_end - 1)); /* + * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal + * bits are not required. Since the support for these bits is going to + * be deprecated in upcoming platforms, avoid writing these bits for the + * platforms that do not use legacy Timing Generator. + */ + if (intel_vrr_always_use_vrr_tg(display)) + crtc_vtotal = 1; + + /* * The double buffer latch point for TRANS_VTOTAL * is the transcoder's undelayed vblank. */ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder), VACTIVE(crtc_vdisplay - 1) | VTOTAL(crtc_vtotal - 1)); + + intel_vrr_set_fixed_rr_timings(crtc_state); + intel_vrr_transcoder_enable(crtc_state); } static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) @@ -3835,7 +3849,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, struct intel_display_power_domain_set *power_domain_set) { struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder; enum port port; u32 tmp; @@ -3857,7 +3870,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, * registers/MIPI[BXT]. We can break out here early, since we * need the same DSI PLL to be enabled for both DSI ports. */ - if (!bxt_dsi_pll_is_enabled(dev_priv)) + if (!bxt_dsi_pll_is_enabled(display)) break; /* XXX: this works for video mode only */ @@ -3920,7 +3933,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, DISPLAY_VER(display) >= 11) intel_get_transcoder_timings(crtc, pipe_config); - if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) + if (transcoder_has_vrr(pipe_config)) intel_vrr_get_config(pipe_config); intel_get_pipe_src_size(crtc, pipe_config); @@ -4147,8 +4160,6 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int linetime_wm; @@ -4161,7 +4172,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) /* Display WA #1135: BXT:ALL GLK:ALL */ if ((display->platform.geminilake || display->platform.broxton) && - skl_watermark_ipc_enabled(dev_priv)) + skl_watermark_ipc_enabled(display)) linetime_wm /= 2; return min(linetime_wm, 0x1ff); @@ -5387,8 +5398,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(vrr.vmin); PIPE_CONF_CHECK_I(vrr.vmax); PIPE_CONF_CHECK_I(vrr.flipline); - PIPE_CONF_CHECK_I(vrr.pipeline_full); - PIPE_CONF_CHECK_I(vrr.guardband); PIPE_CONF_CHECK_I(vrr.vsync_start); PIPE_CONF_CHECK_I(vrr.vsync_end); PIPE_CONF_CHECK_LLI(cmrr.cmrr_m); @@ -5396,6 +5405,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(cmrr.enable); } + if (!fastset || intel_vrr_always_use_vrr_tg(display)) { + PIPE_CONF_CHECK_I(vrr.pipeline_full); + PIPE_CONF_CHECK_I(vrr.guardband); + } + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_LLI @@ -6429,7 +6443,7 @@ int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - ret = intel_bw_atomic_check(state); + ret = intel_bw_atomic_check(state, any_ms); if (ret) goto fail; @@ -7231,7 +7245,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, static void intel_atomic_commit_tail(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(display->drm); + struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; @@ -7445,7 +7459,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * toggling overhead at and above 60 FPS. */ intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17); - intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); + intel_display_rpm_put(display, state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -7517,10 +7531,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, { struct intel_display *display = to_intel_display(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); - struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + state->wakeref = intel_display_rpm_get(display); /* * The intel_legacy_cursor_update() fast path takes care @@ -7554,7 +7567,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, if (ret) { drm_dbg_atomic(display->drm, "Preparing state failed with %i\n", ret); - intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); + intel_display_rpm_put(display, state->wakeref); return ret; } @@ -7564,7 +7577,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, if (ret) { drm_atomic_helper_unprepare_planes(dev, &state->base); - intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); + intel_display_rpm_put(display, state->wakeref); return ret; } @@ -7672,7 +7685,7 @@ void intel_setup_outputs(struct intel_display *display) intel_bios_for_each_encoder(display, intel_ddi_init); if (display->platform.geminilake || display->platform.broxton) - vlv_dsi_init(dev_priv); + vlv_dsi_init(display); } else if (HAS_PCH_SPLIT(dev_priv)) { int found; @@ -7681,7 +7694,7 @@ void intel_setup_outputs(struct intel_display *display) * to prevent the registration of both eDP and LVDS and the * incorrect sharing of the PPS. */ - intel_lvds_init(dev_priv); + intel_lvds_init(display); intel_crt_init(display); dpd_is_edp = intel_dp_is_port_edp(display, PORT_D); @@ -7756,15 +7769,15 @@ void intel_setup_outputs(struct intel_display *display) g4x_hdmi_init(display, CHV_HDMID, PORT_D); } - vlv_dsi_init(dev_priv); + vlv_dsi_init(display); } else if (display->platform.pineview) { - intel_lvds_init(dev_priv); + intel_lvds_init(display); intel_crt_init(display); } else if (IS_DISPLAY_VER(display, 3, 4)) { bool found = false; if (display->platform.mobile) - intel_lvds_init(dev_priv); + intel_lvds_init(display); intel_crt_init(display); @@ -7806,10 +7819,10 @@ void intel_setup_outputs(struct intel_display *display) intel_tv_init(display); } else if (DISPLAY_VER(display) == 2) { if (display->platform.i85x) - intel_lvds_init(dev_priv); + intel_lvds_init(display); intel_crt_init(display); - intel_dvo_init(dev_priv); + intel_dvo_init(display); } for_each_intel_encoder(display->drm, encoder) { @@ -7819,7 +7832,7 @@ void intel_setup_outputs(struct intel_display *display) intel_encoder_possible_clones(encoder); } - intel_init_pch_refclk(dev_priv); + intel_init_pch_refclk(display); drm_helper_move_panel_connectors_to_head(display->drm); } @@ -8083,6 +8096,9 @@ retry: goto out; } + if (!crtc_state->hw.active) + crtc_state->inherited = false; + if (crtc_state->hw.active) { struct intel_encoder *encoder; diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index eeb7ae3eaea8..eb6d6f2d0f75 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -80,7 +80,7 @@ struct intel_display_funcs { /* functions used for watermark calcs for display. */ struct intel_wm_funcs { /* update_wm is for legacy wm management */ - void (*update_wm)(struct drm_i915_private *dev_priv); + void (*update_wm)(struct intel_display *display); int (*compute_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*initial_watermarks)(struct intel_atomic_state *state, @@ -90,8 +90,8 @@ struct intel_wm_funcs { void (*optimize_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); int (*compute_global_watermarks)(struct intel_atomic_state *state); - void (*get_hw_state)(struct drm_i915_private *i915); - void (*sanitize)(struct drm_i915_private *i915); + void (*get_hw_state)(struct intel_display *display); + void (*sanitize)(struct intel_display *display); }; struct intel_audio_state { @@ -160,6 +160,7 @@ struct intel_hotplug { struct { unsigned long last_jiffies; int count; + int blocked_count; enum { HPD_ENABLED = 0, HPD_DISABLED = 1, @@ -170,8 +171,8 @@ struct intel_hotplug { u32 retry_bits; struct delayed_work reenable_work; - u32 long_port_mask; - u32 short_port_mask; + u32 long_hpd_pin_mask; + u32 short_hpd_pin_mask; struct work_struct dig_port_work; struct work_struct poll_init_work; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index fdedf65bee53..4c208fdb9137 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -24,6 +24,7 @@ #include "intel_display_debugfs_params.h" #include "intel_display_power.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" @@ -52,8 +53,11 @@ static struct intel_display *node_to_intel_display(struct drm_info_node *node) static int intel_display_caps(struct seq_file *m, void *data) { struct intel_display *display = node_to_intel_display(m->private); + struct drm_i915_private *i915 = to_i915(display->drm); struct drm_printer p = drm_seq_file_printer(m); + drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(i915)); + intel_display_device_info_print(DISPLAY_INFO(display), DISPLAY_RUNTIME_INFO(display), &p); intel_display_params_dump(&display->params, display->drm->driver->name, &p); @@ -580,13 +584,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) static int i915_display_info(struct seq_file *m, void *unused) { struct intel_display *display = node_to_intel_display(m->private); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); drm_modeset_lock_all(display->drm); @@ -605,18 +608,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_modeset_unlock_all(display->drm); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_display_capabilities(struct seq_file *m, void *unused) -{ - struct intel_display *display = node_to_intel_display(m->private); - struct drm_printer p = drm_seq_file_printer(m); - - intel_display_device_info_print(DISPLAY_INFO(display), - DISPLAY_RUNTIME_INFO(display), &p); + intel_display_rpm_put(display, wakeref); return 0; } @@ -690,14 +682,11 @@ static bool intel_lpsp_power_well_enabled(struct intel_display *display, enum i915_power_well_id power_well_id) { - struct drm_i915_private *i915 = to_i915(display->drm); - intel_wakeref_t wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - is_enabled = intel_display_power_well_is_enabled(display, - power_well_id); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + with_intel_display_rpm(display) + is_enabled = intel_display_power_well_is_enabled(display, + power_well_id); return is_enabled; } @@ -820,7 +809,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, - {"i915_display_capabilities", i915_display_capabilities, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_ddb_info", i915_ddb_info, 0}, @@ -829,7 +817,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = { void intel_display_debugfs_register(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root, @@ -844,10 +831,10 @@ void intel_display_debugfs_register(struct intel_display *display) intel_dmc_debugfs_register(display); intel_dp_test_debugfs_register(display); intel_fbc_debugfs_register(display); - intel_hpd_debugfs_register(i915); + intel_hpd_debugfs_register(display); intel_opregion_debugfs_register(display); intel_psr_debugfs_register(display); - intel_wm_debugfs_register(i915); + intel_wm_debugfs_register(display); intel_display_debugfs_params(display); } diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 7a3bb77c7af7..87c666792c0d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -143,9 +143,11 @@ struct intel_display_platforms { #define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) #define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5) +#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13) #define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display)) #define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl) #define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash) +#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20) #define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13) #define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13)) #define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s) @@ -156,9 +158,9 @@ struct intel_display_platforms { #define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20) #define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell) #define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4) -#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst) #define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) #define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13) +#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst) #define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb) #define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc) #define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display)) @@ -167,9 +169,10 @@ struct intel_display_platforms { #define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30) #define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg) #define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3) -#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4) #define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake) +#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4) #define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch) +#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug) #define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx) #define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc) #define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell) @@ -190,10 +193,7 @@ struct intel_display_platforms { ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \ HAS_DSC(__display)) #define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11) -#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13) -#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20) #define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask)) -#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug) #define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical) #define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv) diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 31740a677dd8..efee8925987e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -82,7 +82,6 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev) void intel_display_driver_init_hw(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_cdclk_state *cdclk_state; if (!HAS_DISPLAY(display)) @@ -94,7 +93,7 @@ void intel_display_driver_init_hw(struct intel_display *display) intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); cdclk_state->logical = cdclk_state->actual = display->cdclk.hw; - intel_display_wa_apply(i915); + intel_display_wa_apply(display); } static const struct drm_mode_config_funcs intel_mode_funcs = { @@ -181,8 +180,6 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display) void intel_display_driver_early_probe(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (!HAS_DISPLAY(display)) return; @@ -193,12 +190,12 @@ void intel_display_driver_early_probe(struct intel_display *display) mutex_init(&display->pps.mutex); mutex_init(&display->hdcp.hdcp_mutex); - intel_display_irq_init(i915); + intel_display_irq_init(display); intel_dkl_phy_init(display); intel_color_init_hooks(display); intel_init_cdclk_hooks(display); intel_audio_hooks_init(display); - intel_dpll_init_clock_hook(i915); + intel_dpll_init_clock_hook(display); intel_init_display_hooks(display); intel_fdi_init_hook(display); intel_dmc_wl_init(display); @@ -255,11 +252,11 @@ int intel_display_driver_probe_noirq(struct intel_display *display) if (ret) goto cleanup_vga_client_pw_domain_dmc; - ret = intel_dbuf_init(i915); + ret = intel_dbuf_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; - ret = intel_bw_init(i915); + ret = intel_bw_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; @@ -315,11 +312,9 @@ static void set_display_access(struct intel_display *display, */ void intel_display_driver_enable_user_access(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - set_display_access(display, true, NULL); - intel_hpd_enable_detection_work(i915); + intel_hpd_enable_detection_work(display); } /** @@ -341,9 +336,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display) */ void intel_display_driver_disable_user_access(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - intel_hpd_disable_detection_work(i915); + intel_hpd_disable_detection_work(display); set_display_access(display, false, current); } @@ -429,7 +422,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display) if (!HAS_DISPLAY(display)) return 0; - intel_wm_init(i915); + intel_wm_init(display); intel_panel_sanitize_ssc(display); @@ -483,7 +476,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display) * since the watermark calculation done here will use pstate->fb. */ if (!HAS_GMCH(display)) - ilk_wm_sanitize(i915); + ilk_wm_sanitize(display); return 0; @@ -498,7 +491,6 @@ err_mode_config: /* part #3: call after gem init */ int intel_display_driver_probe(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret; if (!HAS_DISPLAY(display)) @@ -524,9 +516,9 @@ int intel_display_driver_probe(struct intel_display *display) intel_overlay_setup(display); /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(i915); + intel_hpd_init(display); - skl_watermark_ipc_init(i915); + skl_watermark_ipc_init(display); return 0; } @@ -558,7 +550,7 @@ void intel_display_driver_register(struct intel_display *display) * fbdev->async_cookie. */ drm_kms_helper_poll_init(display->drm); - intel_hpd_poll_disable(i915); + intel_hpd_poll_disable(display); intel_fbdev_setup(i915); @@ -600,7 +592,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display) * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. */ - intel_hpd_poll_fini(i915); + intel_hpd_poll_fini(display); intel_unregister_dsm_handler(); @@ -733,7 +725,6 @@ __intel_display_driver_resume(struct intel_display *display, void intel_display_driver_resume(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct drm_atomic_state *state = display->restore.modeset_state; struct drm_modeset_acquire_ctx ctx; int ret; @@ -761,7 +752,7 @@ void intel_display_driver_resume(struct intel_display *display) if (!ret) ret = __intel_display_driver_resume(display, state, &ctx); - skl_watermark_ipc_update(i915); + skl_watermark_ipc_update(display); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index aa23bb817805..d2a35e3630b1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -14,6 +14,7 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_irq.h" +#include "intel_display_rpm.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dmc_wl.h" @@ -115,9 +116,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display, } static void -intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) +intel_handle_vblank(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); drm_crtc_handle_vblank(&crtc->base); @@ -125,59 +125,59 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) /** * ilk_update_display_irq - update DEIMR - * @dev_priv: driver private + * @display: display device * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void ilk_update_display_irq(struct drm_i915_private *dev_priv, +void ilk_update_display_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); new_val = dev_priv->irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != dev_priv->irq_mask && - !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { + !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { dev_priv->irq_mask = new_val; intel_de_write(display, DEIMR, dev_priv->irq_mask); intel_de_posting_read(display, DEIMR); } } -void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) +void ilk_enable_display_irq(struct intel_display *display, u32 bits) { - ilk_update_display_irq(i915, bits, bits); + ilk_update_display_irq(display, bits, bits); } -void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) +void ilk_disable_display_irq(struct intel_display *display, u32 bits) { - ilk_update_display_irq(i915, bits, 0); + ilk_update_display_irq(display, bits, 0); } /** * bdw_update_port_irq - update DE port interrupt - * @dev_priv: driver private + * @display: display device * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void bdw_update_port_irq(struct drm_i915_private *dev_priv, +void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 new_val; u32 old_val; lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; old_val = intel_de_read(display, GEN8_DE_PORT_IMR); @@ -194,83 +194,83 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv, /** * bdw_update_pipe_irq - update DE pipe interrupt - * @dev_priv: driver private + * @display: display device * @pipe: pipe whose interrupt to update * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, +static void bdw_update_pipe_irq(struct intel_display *display, enum pipe pipe, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; - new_val = dev_priv->display.irq.de_irq_mask[pipe]; + new_val = display->irq.de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) { - dev_priv->display.irq.de_irq_mask[pipe] = new_val; + if (new_val != display->irq.de_irq_mask[pipe]) { + display->irq.de_irq_mask[pipe] = new_val; intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); } } -void bdw_enable_pipe_irq(struct drm_i915_private *i915, +void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits) { - bdw_update_pipe_irq(i915, pipe, bits, bits); + bdw_update_pipe_irq(display, pipe, bits, bits); } -void bdw_disable_pipe_irq(struct drm_i915_private *i915, +void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits) { - bdw_update_pipe_irq(i915, pipe, bits, 0); + bdw_update_pipe_irq(display, pipe, bits, 0); } /** * ibx_display_interrupt_update - update SDEIMR - * @dev_priv: driver private + * @display: display device * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, +void ibx_display_interrupt_update(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 sdeimr = intel_de_read(display, SDEIMR); sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); lockdep_assert_held(&dev_priv->irq_lock); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; intel_de_write(display, SDEIMR, sdeimr); intel_de_posting_read(display, SDEIMR); } -void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) +void ibx_enable_display_interrupt(struct intel_display *display, u32 bits) { - ibx_display_interrupt_update(i915, bits, bits); + ibx_display_interrupt_update(display, bits, bits); } -void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) +void ibx_disable_display_interrupt(struct intel_display *display, u32 bits) { - ibx_display_interrupt_update(i915, bits, 0); + ibx_display_interrupt_update(display, bits, 0); } u32 i915_pipestat_enable_mask(struct intel_display *display, @@ -318,48 +318,48 @@ out: return enable_mask; } -void i915_enable_pipestat(struct drm_i915_private *dev_priv, +void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask) { - struct intel_display *display = &dev_priv->display; - i915_reg_t reg = PIPESTAT(dev_priv, pipe); + struct drm_i915_private *dev_priv = to_i915(display->drm); + i915_reg_t reg = PIPESTAT(display, pipe); u32 enable_mask; - drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: status_mask=0x%x\n", pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); + drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); - if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) + if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) return; - dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask; + display->irq.pipestat_irq_mask[pipe] |= status_mask; enable_mask = i915_pipestat_enable_mask(display, pipe); intel_de_write(display, reg, enable_mask | status_mask); intel_de_posting_read(display, reg); } -void i915_disable_pipestat(struct drm_i915_private *dev_priv, +void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask) { - struct intel_display *display = &dev_priv->display; - i915_reg_t reg = PIPESTAT(dev_priv, pipe); + struct drm_i915_private *dev_priv = to_i915(display->drm); + i915_reg_t reg = PIPESTAT(display, pipe); u32 enable_mask; - drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: status_mask=0x%x\n", pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); + drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); - if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0) + if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0) return; - dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask; + display->irq.pipestat_irq_mask[pipe] &= ~status_mask; enable_mask = i915_pipestat_enable_mask(display, pipe); intel_de_write(display, reg, enable_mask | status_mask); @@ -368,24 +368,22 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, static bool i915_has_legacy_blc_interrupt(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - if (IS_I85X(i915)) + if (display->platform.i85x) return true; - if (IS_PINEVIEW(i915)) + if (display->platform.pineview) return true; - return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915); + return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile; } /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion - * @dev_priv: i915 device private + * @display: display device */ -void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) +void i915_enable_asle_pipestat(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); if (!intel_opregion_asle_present(display)) return; @@ -395,22 +393,21 @@ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); - if (DISPLAY_VER(dev_priv) >= 4) - i915_enable_pipestat(dev_priv, PIPE_A, + i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); + if (DISPLAY_VER(display) >= 4) + i915_enable_pipestat(display, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); } #if IS_ENABLED(CONFIG_DEBUG_FS) -static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void display_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) { - struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -427,7 +424,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, * don't trust that one either. */ if (pipe_crc->skipped <= 0 || - (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { + (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) { pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); return; @@ -440,20 +437,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, } #else static inline void -display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +display_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) {} #endif -static void flip_done_handler(struct drm_i915_private *i915, +static void flip_done_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &i915->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); - spin_lock(&i915->drm.event_lock); + spin_lock(&display->drm->event_lock); if (crtc->flip_done_event) { trace_intel_crtc_flip_done(crtc); @@ -461,25 +457,21 @@ static void flip_done_handler(struct drm_i915_private *i915, crtc->flip_done_event = NULL; } - spin_unlock(&i915->drm.event_lock); + spin_unlock(&display->drm->event_lock); } -static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void hsw_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; - - display_pipe_crc_irq_handler(dev_priv, pipe, + display_pipe_crc_irq_handler(display, pipe, intel_de_read(display, PIPE_CRC_RES_HSW(pipe)), 0, 0, 0, 0); } -static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void ivb_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; - - display_pipe_crc_irq_handler(dev_priv, pipe, + display_pipe_crc_irq_handler(display, pipe, intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)), intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)), intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)), @@ -487,58 +479,56 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe))); } -static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void i9xx_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; u32 res1, res2; - if (DISPLAY_VER(dev_priv) >= 3) - res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe)); + if (DISPLAY_VER(display) >= 3) + res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe)); else res1 = 0; - if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) - res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe)); + if (DISPLAY_VER(display) >= 5 || display->platform.g4x) + res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe)); else res2 = 0; - display_pipe_crc_irq_handler(dev_priv, pipe, - intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)), - intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)), - intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)), + display_pipe_crc_irq_handler(display, pipe, + intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)), + intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)), + intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)), res1, res2); } -static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) +static void i9xx_pipestat_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { intel_de_write(display, - PIPESTAT(dev_priv, pipe), + PIPESTAT(display, pipe), PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); - dev_priv->display.irq.pipestat_irq_mask[pipe] = 0; + display->irq.pipestat_irq_mask[pipe] = 0; } } -void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, +void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; spin_lock(&dev_priv->irq_lock); - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - !dev_priv->display.irq.vlv_display_irqs_enabled) { + if ((display->platform.valleyview || display->platform.cherryview) && + !display->irq.vlv_display_irqs_enabled) { spin_unlock(&dev_priv->irq_lock); return; } - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { i915_reg_t reg; u32 status_mask, enable_mask, iir_bit = 0; @@ -566,12 +556,12 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, break; } if (iir & iir_bit) - status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe]; + status_mask |= display->irq.pipestat_irq_mask[pipe]; if (!status_mask) continue; - reg = PIPESTAT(dev_priv, pipe); + reg = PIPESTAT(display, pipe); pipe_stats[pipe] = intel_de_read(display, reg) & status_mask; enable_mask = i915_pipestat_enable_mask(display, pipe); @@ -592,22 +582,21 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->irq_lock); } -void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, +void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; bool blc_event = false; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(display, pipe); @@ -617,22 +606,21 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_opregion_asle_intr(display); } -void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, +void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; bool blc_event = false; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(display, pipe); @@ -645,21 +633,20 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_gmbus_irq_handler(display); } -void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, +void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) - flip_done_handler(dev_priv, pipe); + flip_done_handler(display, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(display, pipe); @@ -669,18 +656,17 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_gmbus_irq_handler(display); } -static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +static void ibx_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - ibx_hpd_irq_handler(dev_priv, hotplug_trigger); + ibx_hpd_irq_handler(display, hotplug_trigger); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> SDE_AUDIO_POWER_SHIFT); - drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", + drm_dbg(display->drm, "PCH audio power change on port %d\n", port_name(port)); } @@ -691,26 +677,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_HDCP_MASK) - drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); + drm_dbg(display->drm, "PCH HDCP audio interrupt\n"); if (pch_iir & SDE_AUDIO_TRANS_MASK) - drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); + drm_dbg(display->drm, "PCH transcoder audio interrupt\n"); if (pch_iir & SDE_POISON) - drm_err(&dev_priv->drm, "PCH poison interrupt\n"); + drm_err(display->drm, "PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) { - for_each_pipe(dev_priv, pipe) - drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + for_each_pipe(display, pipe) + drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), intel_de_read(display, FDI_RX_IIR(pipe))); } if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) - drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); + drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n"); if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) @@ -753,14 +739,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = { {} }; -static void ivb_err_int_handler(struct drm_i915_private *dev_priv) +static void ivb_err_int_handler(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 err_int = intel_de_read(display, GEN7_ERR_INT); enum pipe pipe; if (err_int & ERR_INT_POISON) - drm_err(&dev_priv->drm, "Poison interrupt\n"); + drm_err(display->drm, "Poison interrupt\n"); if (err_int & ERR_INT_INVALID_GTT_PTE) drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); @@ -768,17 +753,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv) if (err_int & ERR_INT_INVALID_PTE_DATA) drm_err_ratelimited(display->drm, "Invalid PTE data\n"); - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { u32 fault_errors; if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(display, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { - if (IS_IVYBRIDGE(dev_priv)) - ivb_pipe_crc_irq_handler(dev_priv, pipe); + if (display->platform.ivybridge) + ivb_pipe_crc_irq_handler(display, pipe); else - hsw_pipe_crc_irq_handler(dev_priv, pipe); + hsw_pipe_crc_irq_handler(display, pipe); } fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe); @@ -790,34 +775,32 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv) intel_de_write(display, GEN7_ERR_INT, err_int); } -static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) +static void cpt_serr_int_handler(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 serr_int = intel_de_read(display, SERR_INT); enum pipe pipe; if (serr_int & SERR_INT_POISON) - drm_err(&dev_priv->drm, "PCH poison interrupt\n"); + drm_err(display->drm, "PCH poison interrupt\n"); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) intel_pch_fifo_underrun_irq_handler(display, pipe); intel_de_write(display, SERR_INT, serr_int); } -static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +static void cpt_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - ibx_hpd_irq_handler(dev_priv, hotplug_trigger); + ibx_hpd_irq_handler(display, hotplug_trigger); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> SDE_AUDIO_POWER_SHIFT_CPT); - drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", + drm_dbg(display->drm, "PCH audio power change on port %c\n", port_name(port)); } @@ -828,20 +811,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) - drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); + drm_dbg(display->drm, "Audio CP request interrupt\n"); if (pch_iir & SDE_AUDIO_CP_CHG_CPT) - drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); + drm_dbg(display->drm, "Audio CP change interrupt\n"); if (pch_iir & SDE_FDI_MASK_CPT) { - for_each_pipe(dev_priv, pipe) - drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + for_each_pipe(display, pipe) + drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), intel_de_read(display, FDI_RX_IIR(pipe))); } if (pch_iir & SDE_ERROR_CPT) - cpt_serr_int_handler(dev_priv); + cpt_serr_int_handler(display); } static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe) @@ -894,14 +877,14 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display) } } -void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) +void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; if (hotplug_trigger) - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + ilk_hpd_irq_handler(display, hotplug_trigger); if (de_iir & DE_AUX_CHANNEL_A) intel_dp_aux_irq_handler(display); @@ -910,23 +893,23 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) intel_opregion_asle_intr(display); if (de_iir & DE_POISON) - drm_err(&dev_priv->drm, "Poison interrupt\n"); + drm_err(display->drm, "Poison interrupt\n"); if (de_iir & DE_GTT_FAULT) ilk_gtt_fault_irq_handler(display); - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe)) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (de_iir & DE_PLANE_FLIP_DONE(pipe)) - flip_done_handler(dev_priv, pipe); + flip_done_handler(display, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(display, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); } /* check event from PCH */ @@ -934,34 +917,34 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) u32 pch_iir = intel_de_read(display, SDEIIR); if (HAS_PCH_CPT(dev_priv)) - cpt_irq_handler(dev_priv, pch_iir); + cpt_irq_handler(display, pch_iir); else - ibx_irq_handler(dev_priv, pch_iir); + ibx_irq_handler(display, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ intel_de_write(display, SDEIIR, pch_iir); } - if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) + if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT) gen5_rps_irq_handler(&to_gt(dev_priv)->rps); } -void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) +void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; if (hotplug_trigger) - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + ilk_hpd_irq_handler(display, hotplug_trigger); if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev_priv); + ivb_err_int_handler(display); if (de_iir & DE_EDP_PSR_INT_HSW) { struct intel_encoder *encoder; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 psr_iir; @@ -977,35 +960,35 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) if (de_iir & DE_GSE_IVB) intel_opregion_asle_intr(display); - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) - flip_done_handler(dev_priv, pipe); + flip_done_handler(display, pipe); } /* check event from PCH */ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { u32 pch_iir = intel_de_read(display, SDEIIR); - cpt_irq_handler(dev_priv, pch_iir); + cpt_irq_handler(display, pch_iir); /* clear PCH hotplug event before clear CPU irq */ intel_de_write(display, SDEIIR, pch_iir); } } -static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) +static u32 gen8_de_port_aux_mask(struct intel_display *display) { u32 mask; - if (DISPLAY_VER(dev_priv) >= 20) + if (DISPLAY_VER(display) >= 20) return 0; - else if (DISPLAY_VER(dev_priv) >= 14) + else if (DISPLAY_VER(display) >= 14) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB; - else if (DISPLAY_VER(dev_priv) >= 13) + else if (DISPLAY_VER(display) >= 13) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | TGL_DE_PORT_AUX_DDIC | @@ -1015,7 +998,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) TGL_DE_PORT_AUX_USBC2 | TGL_DE_PORT_AUX_USBC3 | TGL_DE_PORT_AUX_USBC4; - else if (DISPLAY_VER(dev_priv) >= 12) + else if (DISPLAY_VER(display) >= 12) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | TGL_DE_PORT_AUX_DDIC | @@ -1027,12 +1010,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) TGL_DE_PORT_AUX_USBC6; mask = GEN8_AUX_CHANNEL_A; - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(display) >= 9) mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { mask |= ICL_AUX_CHANNEL_F; mask |= ICL_AUX_CHANNEL_E; } @@ -1040,10 +1023,8 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) return mask; } -static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) +static u32 gen8_de_pipe_fault_mask(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - if (DISPLAY_VER(display) >= 14) return MTL_PIPEDMC_ATS_FAULT | MTL_PLANE_ATS_FAULT | @@ -1195,15 +1176,14 @@ gen8_pipe_fault_handlers(struct intel_display *display) return bdw_pipe_fault_handlers; } -static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) +static void intel_pmdemand_irq_handler(struct intel_display *display) { - wake_up_all(&dev_priv->display.pmdemand.waitqueue); + wake_up_all(&display->pmdemand.waitqueue); } static void -gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +gen8_de_misc_irq_handler(struct intel_display *display, u32 iir) { - struct intel_display *display = &dev_priv->display; bool found = false; if (HAS_DBUF_OVERLAP_DETECTION(display)) { @@ -1213,20 +1193,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } } - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { if (iir & (XELPDP_PMDEMAND_RSP | XELPDP_PMDEMAND_RSPTOUT_ERR)) { if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Error waiting for Punit PM Demand Response\n"); - intel_pmdemand_irq_handler(dev_priv); + intel_pmdemand_irq_handler(display); found = true; } if (iir & XELPDP_RM_TIMEOUT) { u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE); - drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val); + drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val); found = true; } } else if (iir & GEN8_DE_MISC_GSE) { @@ -1239,12 +1219,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) u32 psr_iir; i915_reg_t iir_reg; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (DISPLAY_VER(dev_priv) >= 12) - iir_reg = TRANS_PSR_IIR(dev_priv, - intel_dp->psr.transcoder); + if (DISPLAY_VER(display) >= 12) + iir_reg = TRANS_PSR_IIR(display, + intel_dp->psr.transcoder); else iir_reg = EDP_PSR_IIR; @@ -1256,19 +1236,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_psr_irq_handler(intel_dp, psr_iir); /* prior GEN12 only have one EDP PSR */ - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(display) < 12) break; } } if (!found) - drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); + drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); } -static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, +static void gen11_dsi_te_interrupt_handler(struct intel_display *display, u32 te_trigger) { - struct intel_display *display = &dev_priv->display; enum pipe pipe = INVALID_PIPE; enum transcoder dsi_trans; enum port port; @@ -1278,7 +1257,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, * Incase of dual link, TE comes from DSI_1 * this is to check if dual link is enabled */ - val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0)); + val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0)); val &= PORT_SYNC_MODE_ENABLE; /* @@ -1294,12 +1273,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, val = val & OP_MODE_MASK; if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { - drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); + drm_err(display->drm, "DSI trancoder not configured in command mode\n"); return; } /* Get PIPE for handling VBLANK event */ - val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); switch (val & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: pipe = PIPE_A; @@ -1311,28 +1290,28 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, pipe = PIPE_C; break; default: - drm_err(&dev_priv->drm, "Invalid PIPE\n"); + drm_err(display->drm, "Invalid PIPE\n"); return; } - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); /* clear TE in dsi IIR */ port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); } -static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) +static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) return GEN9_PIPE_PLANE1_FLIP_DONE; else return GEN8_PIPE_PRIMARY_FLIP_DONE; } -static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) +static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 pica_ier = 0; *pica_iir = 0; @@ -1346,7 +1325,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i * their flags both in the PICA and SDE IIR. */ if (*pch_iir & SDE_PICAINTERRUPT) { - drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); + drm_WARN_ON(display->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0); *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR); @@ -1359,32 +1338,32 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i intel_de_write(display, PICAINTERRUPT_IER, pica_ier); } -void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) +void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 iir; enum pipe pipe; - drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); + drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display)); if (master_ctl & GEN8_DE_MISC_IRQ) { iir = intel_de_read(display, GEN8_DE_MISC_IIR); if (iir) { intel_de_write(display, GEN8_DE_MISC_IIR, iir); - gen8_de_misc_irq_handler(dev_priv, iir); + gen8_de_misc_irq_handler(display, iir); } else { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied (DE MISC)!\n"); } } - if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { + if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { iir = intel_de_read(display, GEN11_DE_HPD_IIR); if (iir) { intel_de_write(display, GEN11_DE_HPD_IIR, iir); - gen11_hpd_irq_handler(dev_priv, iir); + gen11_hpd_irq_handler(display, iir); } else { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied, (DE HPD)!\n"); } } @@ -1396,52 +1375,52 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) intel_de_write(display, GEN8_DE_PORT_IIR, iir); - if (iir & gen8_de_port_aux_mask(dev_priv)) { + if (iir & gen8_de_port_aux_mask(display)) { intel_dp_aux_irq_handler(display); found = true; } - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { - bxt_hpd_irq_handler(dev_priv, hotplug_trigger); + bxt_hpd_irq_handler(display, hotplug_trigger); found = true; } - } else if (IS_BROADWELL(dev_priv)) { + } else if (display->platform.broadwell) { u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + ilk_hpd_irq_handler(display, hotplug_trigger); found = true; } } - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && + if ((display->platform.geminilake || display->platform.broxton) && (iir & BXT_DE_PORT_GMBUS)) { intel_gmbus_irq_handler(display); found = true; } - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(display) >= 11) { u32 te_trigger = iir & (DSI0_TE | DSI1_TE); if (te_trigger) { - gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); + gen11_dsi_te_interrupt_handler(display, te_trigger); found = true; } } if (!found) - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "Unexpected DE Port interrupt\n"); } else { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied (DE PORT)!\n"); } } - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { u32 fault_errors; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) @@ -1449,7 +1428,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe)); if (!iir) { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied (DE PIPE)!\n"); continue; } @@ -1457,29 +1436,29 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); - if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) - flip_done_handler(dev_priv, pipe); + if (iir & gen8_de_pipe_flip_done_mask(display)) + flip_done_handler(display, pipe); - if (HAS_DSB(dev_priv)) { + if (HAS_DSB(display)) { if (iir & GEN12_DSB_INT(INTEL_DSB_0)) - intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0); + intel_dsb_irq_handler(display, pipe, INTEL_DSB_0); if (iir & GEN12_DSB_INT(INTEL_DSB_1)) - intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1); + intel_dsb_irq_handler(display, pipe, INTEL_DSB_1); if (iir & GEN12_DSB_INT(INTEL_DSB_2)) - intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2); + intel_dsb_irq_handler(display, pipe, INTEL_DSB_2); } if (iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev_priv, pipe); + hsw_pipe_crc_irq_handler(display, pipe); if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(display, pipe); - fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); + fault_errors = iir & gen8_de_pipe_fault_mask(display); if (fault_errors) intel_pipe_fault_irq_handler(display, gen8_pipe_fault_handlers(display), @@ -1495,31 +1474,30 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ - gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); + gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir); if (iir) { if (pica_iir) - xelpdp_pica_irq_handler(dev_priv, pica_iir); + xelpdp_pica_irq_handler(display, pica_iir); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_handler(dev_priv, iir); + icp_irq_handler(display, iir); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - spt_irq_handler(dev_priv, iir); + spt_irq_handler(display, iir); else - cpt_irq_handler(dev_priv, iir); + cpt_irq_handler(display, iir); } else { /* * Like on previous PCH there seems to be something * fishy going on with forwarding PCH interrupts. */ - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "The master control interrupt lied (SDE)!\n"); } } } -u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) +u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) { - struct intel_display *display = &i915->display; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -1532,20 +1510,17 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) return iir; } -void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) +void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) { - struct intel_display *display = &i915->display; - if (iir & GEN11_GU_MISC_GSE) intel_opregion_asle_intr(display); } -void gen11_display_irq_handler(struct drm_i915_private *i915) +void gen11_display_irq_handler(struct intel_display *display) { - struct intel_display *display = &i915->display; u32 disp_ctl; - disable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_display_rpm_assert_block(display); /* * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ * for the display related bits. @@ -1553,16 +1528,15 @@ void gen11_display_irq_handler(struct drm_i915_private *i915) disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL); intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); - gen8_de_irq_handler(i915, disp_ctl); + gen8_de_irq_handler(display, disp_ctl); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - enable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_display_rpm_assert_unblock(display); } -static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) +static void i915gm_irq_cstate_wa_enable(struct intel_display *display) { - struct intel_display *display = &i915->display; - lockdep_assert_held(&i915->drm.vblank_time_lock); + lockdep_assert_held(&display->drm->vblank_time_lock); /* * Vblank/CRC interrupts fail to wake the device up from C2+. @@ -1570,41 +1544,41 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) * the problem. There is a small power cost so we do this * only when vblank/CRC interrupts are actually enabled. */ - if (i915->display.irq.vblank_enabled++ == 0) + if (display->irq.vblank_enabled++ == 0) intel_de_write(display, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } -static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915) +static void i915gm_irq_cstate_wa_disable(struct intel_display *display) { - struct intel_display *display = &i915->display; - lockdep_assert_held(&i915->drm.vblank_time_lock); + lockdep_assert_held(&display->drm->vblank_time_lock); - if (--i915->display.irq.vblank_enabled == 0) + if (--display->irq.vblank_enabled == 0) intel_de_write(display, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } -void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable) +void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) { - spin_lock_irq(&i915->drm.vblank_time_lock); + spin_lock_irq(&display->drm->vblank_time_lock); if (enable) - i915gm_irq_cstate_wa_enable(i915); + i915gm_irq_cstate_wa_enable(display); else - i915gm_irq_cstate_wa_disable(i915); + i915gm_irq_cstate_wa_disable(display); - spin_unlock_irq(&i915->drm.vblank_time_lock); + spin_unlock_irq(&display->drm->vblank_time_lock); } int i8xx_enable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -1612,41 +1586,43 @@ int i8xx_enable_vblank(struct drm_crtc *crtc) void i8xx_disable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } int i915gm_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); - i915gm_irq_cstate_wa_enable(i915); + i915gm_irq_cstate_wa_enable(display); return i8xx_enable_vblank(crtc); } void i915gm_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); i8xx_disable_vblank(crtc); - i915gm_irq_cstate_wa_disable(i915); + i915gm_irq_cstate_wa_disable(display); } int i965_enable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, pipe, + i915_enable_pipestat(display, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1655,32 +1631,34 @@ int i965_enable_vblank(struct drm_crtc *crtc) void i965_disable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, + i915_disable_pipestat(display, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } int ilk_enable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + u32 bit = DISPLAY_VER(display) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_enable_display_irq(dev_priv, bit); + ilk_enable_display_irq(display, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Even though there is no DMC, frame counter can get stuck when * PSR is active as no frames are generated. */ - if (HAS_PSR(dev_priv)) + if (HAS_PSR(display)) drm_crtc_vblank_restore(crtc); return 0; @@ -1688,14 +1666,15 @@ int ilk_enable_vblank(struct drm_crtc *crtc) void ilk_disable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + u32 bit = DISPLAY_VER(display) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_disable_display_irq(dev_priv, bit); + ilk_disable_display_irq(display, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1753,13 +1732,13 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) schedule_work(&display->irq.vblank_dc_work); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Even if there is no DMC, frame counter can get stuck when * PSR is active as no frames are generated, so check only for PSR. */ - if (HAS_PSR(dev_priv)) + if (HAS_PSR(display)) drm_crtc_vblank_restore(&crtc->base); return 0; @@ -1777,7 +1756,7 @@ void bdw_disable_vblank(struct drm_crtc *_crtc) return; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0) @@ -1892,11 +1871,11 @@ void vlv_display_error_irq_handler(struct intel_display *display, vlv_page_table_error_irq_handler(display, dpinvgtt); } -static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) +static void _vlv_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); @@ -1904,31 +1883,29 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) gen2_error_reset(to_intel_uncore(display->drm), VLV_ERROR_REGS); - i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); - intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0); + i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); + intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); - i9xx_pipestat_irq_reset(dev_priv); + i9xx_pipestat_irq_reset(display); intel_display_irq_regs_reset(display, VLV_IRQ_REGS); dev_priv->irq_mask = ~0u; } -void vlv_display_irq_reset(struct drm_i915_private *dev_priv) +void vlv_display_irq_reset(struct intel_display *display) { - if (dev_priv->display.irq.vlv_display_irqs_enabled) - _vlv_display_irq_reset(dev_priv); + if (display->irq.vlv_display_irqs_enabled) + _vlv_display_irq_reset(display); } -void i9xx_display_irq_reset(struct drm_i915_private *i915) +void i9xx_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &i915->display; - - if (I915_HAS_HOTPLUG(i915)) { - i915_hotplug_interrupt_update(i915, 0xffffffff, 0); - intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0); + if (HAS_HOTPLUG(display)) { + i915_hotplug_interrupt_update(display, 0xffffffff, 0); + intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); } - i9xx_pipestat_irq_reset(i915); + i9xx_pipestat_irq_reset(display); } static u32 vlv_error_mask(void) @@ -1937,17 +1914,17 @@ static u32 vlv_error_mask(void) return VLV_ERROR_PAGE_TABLE; } -void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) +void vlv_display_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pipestat_mask; u32 enable_mask; enum pipe pipe; - if (!dev_priv->display.irq.vlv_display_irqs_enabled) + if (!display->irq.vlv_display_irqs_enabled) return; - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV | DPINVGTT_EN_MASK_CHV); @@ -1961,9 +1938,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(dev_priv, pipe) - i915_enable_pipestat(dev_priv, pipe, pipestat_mask); + i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(display, pipe) + i915_enable_pipestat(display, pipe, pipestat_mask); enable_mask = I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | @@ -1972,29 +1949,28 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) I915_LPE_PIPE_B_INTERRUPT | I915_MASTER_ERROR_INTERRUPT; - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | I915_LPE_PIPE_C_INTERRUPT; - drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); + drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); dev_priv->irq_mask = ~enable_mask; intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); } -void gen8_display_irq_reset(struct drm_i915_private *dev_priv) +void gen8_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; intel_de_write(display, EDP_PSR_IMR, 0xffffffff); intel_de_write(display, EDP_PSR_IIR, 0xffffffff); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); @@ -2003,22 +1979,22 @@ void gen8_display_irq_reset(struct drm_i915_private *dev_priv) intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); } -void gen11_display_irq_reset(struct drm_i915_private *dev_priv) +void gen11_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { enum transcoder trans; - for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { + for_each_cpu_transcoder_masked(display, trans, trans_mask) { enum intel_display_power_domain domain; domain = POWER_DOMAIN_TRANSCODER(trans); @@ -2026,10 +2002,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) continue; intel_de_write(display, - TRANS_PSR_IMR(dev_priv, trans), + TRANS_PSR_IMR(display, trans), 0xffffffff); intel_de_write(display, - TRANS_PSR_IIR(dev_priv, trans), + TRANS_PSR_IIR(display, trans), 0xffffffff); } } else { @@ -2037,7 +2013,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) intel_de_write(display, EDP_PSR_IIR, 0xffffffff); } - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); @@ -2045,7 +2021,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS); else intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS); @@ -2054,12 +2030,12 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) intel_display_irq_regs_reset(display, SDE_IRQ_REGS); } -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, +void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | - gen8_de_pipe_flip_done_mask(dev_priv); + gen8_de_pipe_flip_done_mask(display); enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); @@ -2069,18 +2045,18 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, return; } - for_each_pipe_masked(dev_priv, pipe, pipe_mask) + for_each_pipe_masked(display, pipe, pipe_mask) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - dev_priv->display.irq.de_irq_mask[pipe], - ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); + display->irq.de_irq_mask[pipe], + ~display->irq.de_irq_mask[pipe] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } -void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, +void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); @@ -2090,7 +2066,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, return; } - for_each_pipe_masked(dev_priv, pipe, pipe_mask) + for_each_pipe_masked(display, pipe, pipe_mask) intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); spin_unlock_irq(&dev_priv->irq_lock); @@ -2110,9 +2086,9 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, * to avoid races with the irq handler, assuming we have MSI. Shared legacy * interrupts could still race. */ -static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) +static void ibx_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 mask; if (HAS_PCH_NOP(dev_priv)) @@ -2128,40 +2104,45 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); } -void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) +void valleyview_enable_display_irqs(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + lockdep_assert_held(&dev_priv->irq_lock); - if (dev_priv->display.irq.vlv_display_irqs_enabled) + if (display->irq.vlv_display_irqs_enabled) return; - dev_priv->display.irq.vlv_display_irqs_enabled = true; + display->irq.vlv_display_irqs_enabled = true; if (intel_irqs_enabled(dev_priv)) { - _vlv_display_irq_reset(dev_priv); - vlv_display_irq_postinstall(dev_priv); + _vlv_display_irq_reset(display); + vlv_display_irq_postinstall(display); } } -void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) +void valleyview_disable_display_irqs(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + lockdep_assert_held(&dev_priv->irq_lock); - if (!dev_priv->display.irq.vlv_display_irqs_enabled) + if (!display->irq.vlv_display_irqs_enabled) return; - dev_priv->display.irq.vlv_display_irqs_enabled = false; + display->irq.vlv_display_irqs_enabled = false; if (intel_irqs_enabled(dev_priv)) - _vlv_display_irq_reset(dev_priv); + _vlv_display_irq_reset(display); } -void ilk_de_irq_postinstall(struct drm_i915_private *i915) +void ilk_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); + u32 display_mask, extra_mask; - if (DISPLAY_VER(i915) >= 7) { + if (DISPLAY_VER(display) >= 7) { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | @@ -2182,59 +2163,59 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) DE_DP_A_HOTPLUG); } - if (IS_HASWELL(i915)) { + if (display->platform.haswell) { intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); display_mask |= DE_EDP_PSR_INT_HSW; } - if (IS_IRONLAKE_M(i915)) + if (display->platform.ironlake && display->platform.mobile) extra_mask |= DE_PCU_EVENT; i915->irq_mask = ~display_mask; - ibx_irq_postinstall(i915); + ibx_irq_postinstall(display); intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, display_mask | extra_mask); } -static void mtp_irq_postinstall(struct drm_i915_private *i915); -static void icp_irq_postinstall(struct drm_i915_private *i915); +static void mtp_irq_postinstall(struct intel_display *display); +static void icp_irq_postinstall(struct intel_display *display); -void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) +void gen8_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | + u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) | GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; - u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); + u32 de_port_masked = gen8_de_port_aux_mask(display); u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); enum pipe pipe; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VER(dev_priv) >= 14) - mtp_irq_postinstall(dev_priv); + if (DISPLAY_VER(display) >= 14) + mtp_irq_postinstall(display); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_postinstall(dev_priv); + icp_irq_postinstall(display); else if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_postinstall(dev_priv); + ibx_irq_postinstall(display); - if (DISPLAY_VER(dev_priv) < 11) + if (DISPLAY_VER(display) < 11) de_misc_masked |= GEN8_DE_MISC_GSE; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) de_port_masked |= BXT_DE_PORT_GMBUS; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; - } else if (DISPLAY_VER(dev_priv) >= 11) { + } else if (DISPLAY_VER(display) >= 11) { enum port port; if (intel_bios_is_dsi_present(display, &port)) @@ -2244,25 +2225,25 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_DBUF_OVERLAP_DETECTION(display)) de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; - if (HAS_DSB(dev_priv)) + if (HAS_DSB(display)) de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | GEN12_DSB_INT(INTEL_DSB_1) | GEN12_DSB_INT(INTEL_DSB_2); de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | - gen8_de_pipe_flip_done_mask(dev_priv); + gen8_de_pipe_flip_done_mask(display); de_port_enables = de_port_masked; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; - else if (IS_BROADWELL(dev_priv)) + else if (display->platform.broadwell) de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { enum transcoder trans; - for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { + for_each_cpu_transcoder_masked(display, trans, trans_mask) { enum intel_display_power_domain domain; domain = POWER_DOMAIN_TRANSCODER(trans); @@ -2270,19 +2251,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) continue; intel_display_irq_regs_assert_irr_is_zero(display, - TRANS_PSR_IIR(dev_priv, trans)); + TRANS_PSR_IIR(display, trans)); } } else { intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); } - for_each_pipe(dev_priv, pipe) { - dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked; + for_each_pipe(display, pipe) { + display->irq.de_irq_mask[pipe] = ~de_pipe_masked; if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - dev_priv->display.irq.de_irq_mask[pipe], + display->irq.de_irq_mask[pipe], de_pipe_enables); } @@ -2291,7 +2272,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked); - if (IS_DISPLAY_VER(dev_priv, 11, 13)) { + if (IS_DISPLAY_VER(display, 11, 13)) { u32 de_hpd_masked = 0; u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; @@ -2301,9 +2282,8 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) } } -static void mtp_irq_postinstall(struct drm_i915_private *i915) +static void mtp_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &i915->display; u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; u32 de_hpd_mask = XELPDP_AUX_TC_MASK; u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | @@ -2315,43 +2295,37 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915) intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); } -static void icp_irq_postinstall(struct drm_i915_private *dev_priv) +static void icp_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 mask = SDE_GMBUS_ICP; intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); } -void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) +void gen11_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - gen8_de_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(display); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); } -void dg1_de_irq_postinstall(struct drm_i915_private *i915) +void dg1_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &i915->display; - - if (!HAS_DISPLAY(i915)) + if (!HAS_DISPLAY(display)) return; - gen8_de_irq_postinstall(i915); + gen8_de_irq_postinstall(display); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); } -void intel_display_irq_init(struct drm_i915_private *i915) +void intel_display_irq_init(struct intel_display *display) { - i915->drm.vblank_disable_immediate = true; + display->drm->vblank_disable_immediate = true; - intel_hotplug_irq_init(i915); + intel_hotplug_irq_init(display); - INIT_WORK(&i915->display.irq.vblank_dc_work, - intel_display_vblank_dc_work); + INIT_WORK(&display->irq.vblank_dc_work, intel_display_vblank_dc_work); } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h index d9867cd0a220..f72727768351 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.h +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -12,28 +12,27 @@ enum pipe; struct drm_crtc; -struct drm_i915_private; struct intel_display; -void valleyview_enable_display_irqs(struct drm_i915_private *i915); -void valleyview_disable_display_irqs(struct drm_i915_private *i915); +void valleyview_enable_display_irqs(struct intel_display *display); +void valleyview_disable_display_irqs(struct intel_display *display); -void ilk_update_display_irq(struct drm_i915_private *i915, +void ilk_update_display_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask); -void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits); -void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits); +void ilk_enable_display_irq(struct intel_display *display, u32 bits); +void ilk_disable_display_irq(struct intel_display *display, u32 bits); -void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask); -void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); -void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); +void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask); +void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits); +void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits); -void ibx_display_interrupt_update(struct drm_i915_private *i915, +void ibx_display_interrupt_update(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask); -void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits); -void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); +void ibx_enable_display_interrupt(struct intel_display *display, u32 bits); +void ibx_disable_display_interrupt(struct intel_display *display, u32 bits); -void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask); -void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask); +void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask); +void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask); int i8xx_enable_vblank(struct drm_crtc *crtc); int i915gm_enable_vblank(struct drm_crtc *crtc); @@ -46,41 +45,41 @@ void i965_disable_vblank(struct drm_crtc *crtc); void ilk_disable_vblank(struct drm_crtc *crtc); void bdw_disable_vblank(struct drm_crtc *crtc); -void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir); -void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir); -void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl); -void gen11_display_irq_handler(struct drm_i915_private *i915); +void ivb_display_irq_handler(struct intel_display *display, u32 de_iir); +void ilk_display_irq_handler(struct intel_display *display, u32 de_iir); +void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl); +void gen11_display_irq_handler(struct intel_display *display); -u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl); -void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir); +u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl); +void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir); -void i9xx_display_irq_reset(struct drm_i915_private *i915); -void vlv_display_irq_reset(struct drm_i915_private *i915); -void gen8_display_irq_reset(struct drm_i915_private *i915); -void gen11_display_irq_reset(struct drm_i915_private *i915); +void i9xx_display_irq_reset(struct intel_display *display); +void vlv_display_irq_reset(struct intel_display *display); +void gen8_display_irq_reset(struct intel_display *display); +void gen11_display_irq_reset(struct intel_display *display); -void vlv_display_irq_postinstall(struct drm_i915_private *i915); -void ilk_de_irq_postinstall(struct drm_i915_private *i915); -void gen8_de_irq_postinstall(struct drm_i915_private *i915); -void gen11_de_irq_postinstall(struct drm_i915_private *i915); -void dg1_de_irq_postinstall(struct drm_i915_private *i915); +void vlv_display_irq_postinstall(struct intel_display *display); +void ilk_de_irq_postinstall(struct intel_display *display); +void gen8_de_irq_postinstall(struct intel_display *display); +void gen11_de_irq_postinstall(struct intel_display *display); +void dg1_de_irq_postinstall(struct intel_display *display); u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe); -void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); -void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); -void i915_enable_asle_pipestat(struct drm_i915_private *i915); +void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask); +void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask); +void i915_enable_asle_pipestat(struct intel_display *display); -void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); -void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); -void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); -void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]); +void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]); void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt); void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt); -void intel_display_irq_init(struct drm_i915_private *i915); +void intel_display_irq_init(struct intel_display *display); -void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable); +void i915gm_irq_cstate_wa(struct intel_display *display, bool enable); #endif /* __INTEL_DISPLAY_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f7171e6932dc..c78315eb44fa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -16,6 +16,7 @@ #include "intel_display_power.h" #include "intel_display_power_map.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_mchbar_regs.h" @@ -204,7 +205,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display, struct i915_power_well *power_well; bool is_enabled; - if (pm_runtime_suspended(display->drm->dev)) + if (intel_display_rpm_suspended(display)) return false; is_enabled = true; @@ -455,7 +456,6 @@ static bool intel_display_power_grab_async_put_ref(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; struct intel_power_domain_mask async_put_mask; bool ret = false; @@ -473,8 +473,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display, goto out_verify; cancel_async_put_work(power_domains, false); - intel_runtime_pm_put_raw(&dev_priv->runtime_pm, - fetch_and_zero(&power_domains->async_put_wakeref)); + intel_display_rpm_put_raw(display, + fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: verify_async_put_domains_state(power_domains); @@ -512,9 +512,10 @@ __intel_display_power_get_domain(struct intel_display *display, intel_wakeref_t intel_display_power_get(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + struct ref_tracker *wakeref; + + wakeref = intel_display_rpm_get(display); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(display, domain); @@ -539,12 +540,11 @@ intel_wakeref_t intel_display_power_get_if_enabled(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get_if_in_use(display); if (!wakeref) return NULL; @@ -560,7 +560,7 @@ intel_display_power_get_if_enabled(struct intel_display *display, mutex_unlock(&power_domains->lock); if (!is_enabled) { - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); wakeref = NULL; } @@ -623,12 +623,10 @@ release_async_put_domains(struct i915_power_domains *power_domains, struct intel_display *display = container_of(power_domains, struct intel_display, power.domains); - struct drm_i915_private *dev_priv = to_i915(display->drm); - struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; - wakeref = intel_runtime_pm_get_noresume(rpm); + wakeref = intel_display_rpm_get_noresume(display); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ @@ -636,7 +634,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, __intel_display_power_put_domain(display, domain); } - intel_runtime_pm_put(rpm, wakeref); + intel_display_rpm_put(display, wakeref); } static void @@ -644,11 +642,10 @@ intel_display_power_put_async_work(struct work_struct *work) { struct intel_display *display = container_of(work, struct intel_display, power.domains.async_put_work.work); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); - intel_wakeref_t old_work_wakeref = NULL; + struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL; + + new_work_wakeref = intel_display_rpm_get_raw(display); mutex_lock(&power_domains->lock); @@ -688,9 +685,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (old_work_wakeref) - intel_runtime_pm_put_raw(rpm, old_work_wakeref); + intel_display_rpm_put_raw(display, old_work_wakeref); if (new_work_wakeref) - intel_runtime_pm_put_raw(rpm, new_work_wakeref); + intel_display_rpm_put_raw(display, new_work_wakeref); } /** @@ -711,10 +708,10 @@ void __intel_display_power_put_async(struct intel_display *display, intel_wakeref_t wakeref, int delay_ms) { - struct drm_i915_private *i915 = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - struct intel_runtime_pm *rpm = &i915->runtime_pm; - intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); + struct ref_tracker *work_wakeref; + + work_wakeref = intel_display_rpm_get_raw(display); delay_ms = delay_ms >= 0 ? delay_ms : 100; @@ -746,9 +743,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(rpm, work_wakeref); + intel_display_rpm_put_raw(display, work_wakeref); - intel_runtime_pm_put(rpm, wakeref); + intel_display_rpm_put(display, wakeref); } /** @@ -765,7 +762,6 @@ out_verify: */ void intel_display_power_flush_work(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; @@ -786,7 +782,7 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); + intel_display_rpm_put_raw(display, work_wakeref); } /** @@ -824,10 +820,8 @@ void intel_display_power_put(struct intel_display *display, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - __intel_display_power_put(display, domain); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); } #else /** @@ -846,10 +840,8 @@ void intel_display_power_put(struct intel_display *display, void intel_display_power_put_unchecked(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - __intel_display_power_put(display, domain); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + intel_display_rpm_put_unchecked(display); } #endif @@ -1381,18 +1373,18 @@ static void hsw_enable_pc8(struct intel_display *display) intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE, 0); - lpt_disable_clkout_dp(dev_priv); + lpt_disable_clkout_dp(display); hsw_disable_lcpll(display, true, true); } static void hsw_disable_pc8(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); + struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); drm_dbg_kms(display->drm, "Disabling package C8+\n"); hsw_restore_lcpll(display); - intel_init_pch_refclk(dev_priv); + intel_init_pch_refclk(display); /* Many display registers don't survive PC8+ */ #ifdef I915 /* FIXME */ @@ -1979,7 +1971,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume) */ void intel_power_domains_driver_remove(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&display->power.domains.init_wakeref); @@ -1993,7 +1984,7 @@ void intel_power_domains_driver_remove(struct intel_display *display) intel_power_domains_verify_state(display); /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); } /** diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index e80e1fd611ca..ab1163744bc5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off, XE3LPD_PW_C_POWER_DOMAINS, XE3LPD_PW_D_POWER_DOMAINS, POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_INIT); static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = { diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 8ec87ffd87d2..b9b4359751cc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" @@ -186,22 +187,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well) static void hsw_power_well_post_enable(struct intel_display *display, u8 irq_pipe_mask, bool has_vga) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (has_vga) intel_vga_reset_io_mem(display); if (irq_pipe_mask) - gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); + gen8_irq_power_well_post_enable(display, irq_pipe_mask); } static void hsw_power_well_pre_disable(struct intel_display *display, u8 irq_pipe_mask) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (irq_pipe_mask) - gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); + gen8_irq_power_well_pre_disable(display, irq_pipe_mask); } #define ICL_AUX_PW_TO_PHY(pw_idx) \ @@ -752,8 +749,9 @@ void gen9_sanitize_dc_state(struct intel_display *display) void gen9_set_dc_state(struct intel_display *display, u32 state) { struct i915_power_domains *power_domains = &display->power.domains; - u32 val; + bool dc6_was_enabled, enable_dc6; u32 mask; + u32 val; if (!HAS_DISPLAY(display)) return; @@ -772,11 +770,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state) drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n", power_domains->dc_state, val & mask); + enable_dc6 = state & DC_STATE_EN_UPTO_DC6; + dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; + if (!dc6_was_enabled && enable_dc6) + intel_dmc_update_dc6_allowed_count(display, true); + val &= ~mask; val |= state; gen9_write_dc_state(display, val); + if (!enable_dc6 && dc6_was_enabled) + intel_dmc_update_dc6_allowed_count(display, false); + power_domains->dc_state = val & mask; } @@ -816,7 +822,8 @@ static void assert_can_enable_dc5(struct intel_display *display) (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + + assert_display_rpm_held(display); assert_dmc_loaded(display); } @@ -1226,7 +1233,7 @@ static void vlv_display_power_well_init(struct intel_display *display) vlv_init_display_clock_gating(display); spin_lock_irq(&dev_priv->irq_lock); - valleyview_enable_display_irqs(dev_priv); + valleyview_enable_display_irqs(display); spin_unlock_irq(&dev_priv->irq_lock); /* @@ -1236,8 +1243,8 @@ static void vlv_display_power_well_init(struct intel_display *display) if (display->power.domains.initializing) return; - intel_hpd_init(dev_priv); - intel_hpd_poll_disable(dev_priv); + intel_hpd_init(display); + intel_hpd_poll_disable(display); /* Re-enable the ADPA, if we have one */ for_each_intel_encoder(display->drm, encoder) { @@ -1255,7 +1262,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display) struct drm_i915_private *dev_priv = to_i915(display->drm); spin_lock_irq(&dev_priv->irq_lock); - valleyview_disable_display_irqs(dev_priv); + valleyview_disable_display_irqs(display); spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ @@ -1265,7 +1272,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display) /* Prevent us from re-enabling polling on accident in late suspend */ if (!display->drm->dev->power.is_suspended) - intel_hpd_poll_enable(dev_priv); + intel_hpd_poll_enable(display); } static void vlv_display_power_well_enable(struct intel_display *display, diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c index 1f2798404f2c..1dbd3e841df3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reset.c +++ b/drivers/gpu/drm/i915/display/intel_display_reset.c @@ -107,14 +107,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only) intel_display_driver_init_hw(display); intel_clock_gating_init(i915); intel_cx0_pll_power_save_wa(display); - intel_hpd_init(i915); + intel_hpd_init(display); ret = __intel_display_driver_resume(display, state, ctx); if (ret) drm_err(display->drm, "Restoring old state failed with %i\n", ret); - intel_hpd_poll_disable(i915); + intel_hpd_poll_disable(display); } drm_atomic_state_put(state); diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c new file mode 100644 index 000000000000..48da67dd0136 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "i915_drv.h" +#include "intel_display_rpm.h" +#include "intel_runtime_pm.h" + +static struct intel_runtime_pm *display_to_rpm(struct intel_display *display) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + return &i915->runtime_pm; +} + +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) +{ + return intel_runtime_pm_get_raw(display_to_rpm(display)); +} + +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put_raw(display_to_rpm(display), wakeref); +} + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display) +{ + return intel_runtime_pm_get(display_to_rpm(display)); +} + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) +{ + return intel_runtime_pm_get_if_in_use(display_to_rpm(display)); +} + +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) +{ + return intel_runtime_pm_get_noresume(display_to_rpm(display)); +} + +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put(display_to_rpm(display), wakeref); +} + +void intel_display_rpm_put_unchecked(struct intel_display *display) +{ + intel_runtime_pm_put_unchecked(display_to_rpm(display)); +} + +bool intel_display_rpm_suspended(struct intel_display *display) +{ + return intel_runtime_pm_suspended(display_to_rpm(display)); +} + +void assert_display_rpm_held(struct intel_display *display) +{ + assert_rpm_wakelock_held(display_to_rpm(display)); +} + +void intel_display_rpm_assert_block(struct intel_display *display) +{ + disable_rpm_wakeref_asserts(display_to_rpm(display)); +} + +void intel_display_rpm_assert_unblock(struct intel_display *display) +{ + enable_rpm_wakeref_asserts(display_to_rpm(display)); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h new file mode 100644 index 000000000000..6ef48515f84b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __INTEL_DISPLAY_RPM__ +#define __INTEL_DISPLAY_RPM__ + +#include <linux/types.h> + +struct intel_display; +struct ref_tracker; + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display); +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref); + +#define __with_intel_display_rpm(__display, __wakeref) \ + for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \ + intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL) + +#define with_intel_display_rpm(__display) \ + __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref)) + +/* Only for special cases. */ +bool intel_display_rpm_suspended(struct intel_display *display); + +void assert_display_rpm_held(struct intel_display *display); +void intel_display_rpm_assert_block(struct intel_display *display); +void intel_display_rpm_assert_unblock(struct intel_display *display); + +/* Only for display power implementation. */ +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display); +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref); + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display); +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display); +void intel_display_rpm_put_unchecked(struct intel_display *display); + +#endif /* __INTEL_DISPLAY_RPM__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 99a6fd2900b9..94468a9d2e0d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -581,7 +581,7 @@ struct dpll { struct intel_atomic_state { struct drm_atomic_state base; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; struct __intel_global_objs_state *global_objs; int num_global_objs; @@ -1620,7 +1620,7 @@ struct intel_psr { bool sink_support; bool source_support; bool enabled; - bool paused; + int pause_counter; enum pipe pipe; enum transcoder transcoder; bool active; @@ -1658,7 +1658,6 @@ struct intel_dp { int link_rate; u8 lane_count; u8 sink_count; - bool link_trained; bool needs_modeset_retry; bool use_max_params; u8 dpcd[DP_RECEIVER_CAP_SIZE]; @@ -1683,6 +1682,7 @@ struct intel_dp { int common_rates[DP_MAX_SUPPORTED_RATES]; struct { /* TODO: move the rest of link specific fields to here */ + bool active; /* common rate,lane_count configs in bw order */ int num_configs; #define INTEL_DP_MAX_LANE_COUNT 4 @@ -1739,7 +1739,7 @@ struct intel_dp { struct { struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES]; struct drm_dp_mst_topology_mgr mgr; - int active_links; + int active_streams; } mst; u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index); diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c index e5a8022db664..da429c332914 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.c +++ b/drivers/gpu/drm/i915/display/intel_display_wa.c @@ -3,38 +3,38 @@ * Copyright © 2023 Intel Corporation */ -#include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" +#include "intel_display_core.h" #include "intel_display_wa.h" -static void gen11_display_wa_apply(struct drm_i915_private *i915) +static void gen11_display_wa_apply(struct intel_display *display) { /* Wa_14010594013 */ - intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP); + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP); } -static void xe_d_display_wa_apply(struct drm_i915_private *i915) +static void xe_d_display_wa_apply(struct intel_display *display) { /* Wa_14013723622 */ - intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0); + intel_de_rmw(display, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0); } -static void adlp_display_wa_apply(struct drm_i915_private *i915) +static void adlp_display_wa_apply(struct intel_display *display) { /* Wa_22011091694:adlp */ - intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS); + intel_de_rmw(display, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS); /* Bspec/49189 Initialize Sequence */ - intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); + intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); } -void intel_display_wa_apply(struct drm_i915_private *i915) +void intel_display_wa_apply(struct intel_display *display) { - if (IS_ALDERLAKE_P(i915)) - adlp_display_wa_apply(i915); - else if (DISPLAY_VER(i915) == 12) - xe_d_display_wa_apply(i915); - else if (DISPLAY_VER(i915) == 11) - gen11_display_wa_apply(i915); + if (display->platform.alderlake_p) + adlp_display_wa_apply(display); + else if (DISPLAY_VER(display) == 12) + xe_d_display_wa_apply(display); + else if (DISPLAY_VER(display) == 11) + gen11_display_wa_apply(display); } diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h index be644ab6ae00..babd9d16603d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.h +++ b/drivers/gpu/drm/i915/display/intel_display_wa.h @@ -8,14 +8,17 @@ #include <linux/types.h> -struct drm_i915_private; +struct intel_display; -void intel_display_wa_apply(struct drm_i915_private *i915); +void intel_display_wa_apply(struct intel_display *display); #ifdef I915 -static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; } +static inline bool intel_display_needs_wa_16023588340(struct intel_display *display) +{ + return false; +} #else -bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915); +bool intel_display_needs_wa_16023588340(struct intel_display *display); #endif #endif diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c index 0813fb9b5823..dad7192132ad 100644 --- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c @@ -4,6 +4,7 @@ */ #include <drm/drm_device.h> +#include <drm/drm_print.h> #include "intel_de.h" #include "intel_display.h" diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index fa6944e55d95..98f80a6c63e8 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -28,6 +28,8 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" +#include "intel_display_rpm.h" +#include "intel_display_power_well.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" #include "intel_step.h" @@ -57,6 +59,10 @@ struct intel_dmc { const char *fw_path; u32 max_fw_size; /* bytes */ u32 version; + struct { + u32 dc5_start; + u32 count; + } dc6_allowed; struct dmc_fw_info { u32 mmio_count; i915_reg_t mmioaddr[20]; @@ -595,7 +601,7 @@ void intel_dmc_load_program(struct intel_display *display) disable_all_event_handlers(display); - assert_rpm_wakelock_held(&i915->runtime_pm); + assert_display_rpm_held(display); preempt_disable(); @@ -1232,18 +1238,57 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct DMC_VERSION_MINOR(snapshot->version)); } +void intel_dmc_update_dc6_allowed_count(struct intel_display *display, + bool start_tracking) +{ + struct intel_dmc *dmc = display_to_dmc(display); + u32 dc5_cur_count; + + if (DISPLAY_VER(dmc->display) < 14) + return; + + dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT); + + if (!start_tracking) + dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start; + + dmc->dc6_allowed.dc5_start = dc5_cur_count; +} + +static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count) +{ + struct i915_power_domains *power_domains = &display->power.domains; + struct intel_dmc *dmc = display_to_dmc(display); + bool dc6_enabled; + + if (DISPLAY_VER(display) < 14) + return false; + + mutex_lock(&power_domains->lock); + dc6_enabled = intel_de_read(display, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC6; + if (dc6_enabled) + intel_dmc_update_dc6_allowed_count(display, false); + + *count = dmc->dc6_allowed.count; + mutex_unlock(&power_domains->lock); + + return true; +} + static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_display *display = m->private; struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc *dmc = display_to_dmc(display); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; + u32 dc6_allowed_count; if (!HAS_DMC(display)) return -ENODEV; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", @@ -1287,7 +1332,11 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) } seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg)); - if (i915_mmio_reg_valid(dc6_reg)) + + if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count)) + seq_printf(m, "DC5 -> DC6 allowed count: %d\n", + dc6_allowed_count); + else if (i915_mmio_reg_valid(dc6_reg)) seq_printf(m, "DC5 -> DC6 count: %d\n", intel_de_read(display, dc6_reg)); @@ -1299,7 +1348,7 @@ out: intel_de_read(display, DMC_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL)); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 44cecef98e73..c78426eb4cd5 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -26,6 +26,7 @@ void intel_dmc_debugfs_register(struct intel_display *display); struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display); void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p); +void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking); void assert_dmc_loaded(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 392c3653d0d7..d7a30d0992b7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -62,6 +62,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_driver.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -87,7 +88,6 @@ #include "intel_pfit.h" #include "intel_pps.h" #include "intel_psr.h" -#include "intel_runtime_pm.h" #include "intel_quirks.h" #include "intel_tc.h" #include "intel_vdsc.h" @@ -3223,7 +3223,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); - intel_dp->link_trained = false; + intel_dp->link.active = false; intel_dp->needs_modeset_retry = false; intel_dp->link_rate = link_rate; intel_dp->lane_count = lane_count; @@ -3587,7 +3587,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder, if (crtc_state) { intel_dp_reset_link_params(intel_dp); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); - intel_dp->link_trained = true; + intel_dp->link.active = true; } } @@ -5005,8 +5005,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) bool link_ok = true; bool reprobe_needed = false; - drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0); - for (;;) { u8 esi[4] = {}; u8 ack[4] = {}; @@ -5021,7 +5019,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi); - if (intel_dp->mst.active_links > 0 && link_ok && + if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok && esi[3] & LINK_STATUS_CHANGED) { if (!intel_dp_mst_link_status(intel_dp)) link_ok = false; @@ -5082,7 +5080,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) { u8 link_status[DP_LINK_STATUS_SIZE]; - if (!intel_dp->link_trained) + if (!intel_dp->link.active) return false; /* @@ -6153,7 +6151,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector, spin_unlock_irq(&i915->irq_lock); if (need_work) - intel_hpd_schedule_detection(i915); + intel_hpd_schedule_detection(display); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -6180,13 +6178,12 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) { struct intel_display *display = to_intel_display(dig_port); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *intel_dp = &dig_port->dp; u8 dpcd[DP_RECEIVER_CAP_SIZE]; if (dig_port->base.type == INTEL_OUTPUT_EDP && (long_hpd || - intel_runtime_pm_suspended(&i915->runtime_pm) || + intel_display_rpm_suspended(display) || !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which @@ -6362,7 +6359,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, * eDP and LVDS bail out early in this case to prevent interfering * with an already powered-on LVDS power sequencer. */ - if (intel_get_lvds_encoder(dev_priv)) { + if (intel_get_lvds_encoder(display)) { drm_WARN_ON(display->drm, !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); drm_info(display->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index ec27bbd70bcf..0496061203fb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -247,7 +247,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, u32 aux_clock_divider; enum intel_display_power_domain aux_domain; intel_wakeref_t aux_wakeref; - intel_wakeref_t pps_wakeref; + intel_wakeref_t pps_wakeref = NULL; int i, ret, recv_bytes; int try, clock = 0; u32 status; @@ -272,7 +272,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, aux_domain = intel_aux_power_domain(dig_port); aux_wakeref = intel_display_power_get(display, aux_domain); - pps_wakeref = intel_pps_lock(intel_dp); + + /* + * The PPS state needs to be locked for: + * - eDP on all platforms, since AUX transfers on eDP need VDD power + * (either forced or via panel power) which depends on the PPS + * state. + * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV), + * since changing the PPS state (via a parallel modeset for + * instance) may interfere with the AUX transfers on a non-eDP + * output as well. + */ + if (intel_dp_is_edp(intel_dp) || + display->platform.valleyview || display->platform.cherryview) + pps_wakeref = intel_pps_lock(intel_dp); /* * We will be called with VDD already enabled for dpcd/edid/oui reads. @@ -430,7 +443,9 @@ out: if (vdd) intel_pps_vdd_off_unlocked(intel_dp, false); - intel_pps_unlock(intel_dp, pps_wakeref); + if (pps_wakeref) + intel_pps_unlock(intel_dp, pps_wakeref); + intel_display_power_put_async(display, aux_domain, aux_wakeref); out_unlock: intel_digital_port_unlock(encoder); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 8173de8aec63..20ab90acb351 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -663,7 +663,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_panel *panel = &connector->panel; - if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) { + if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) && + (intel_dp->edp_dpcd[3] & DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE)) { drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n", connector->base.base.id, connector->base.name); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 2966f5b39392..a479b63112ea 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -56,6 +56,8 @@ lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ } while (0) +#define MAX_SEQ_TRAIN_FAILURES 2 + static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); @@ -164,7 +166,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_ * resetting its internal state when the mode is changed from * non-transparent to transparent. */ - if (intel_dp->link_trained) { + if (intel_dp->link.active) { if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp)) goto out_reset_lttpr_count; @@ -711,8 +713,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + /* + * Currently, we set the MSA ignore bit based on vrr.in_range. + * We can't really read that out during driver load since we don't have + * the connector information read in yet. So if we do end up doing a + * modeset during initial_commit() we'll clear the MSA ignore bit. + * GOP likely wouldn't have set this bit so after the initial commit, + * if there are no modesets and we enable VRR mode seamlessly + * (without a full modeset), the MSA ignore bit might never get set. + * + * #TODO: Implement readout of vrr.in_range. + * We need fastset support for setting the MSA ignore bit in DPCD, + * especially on the first real commit when clearing the inherited flag. + */ intel_dp_link_training_set_mode(intel_dp, - crtc_state->port_clock, crtc_state->vrr.flipline); + crtc_state->port_clock, crtc_state->vrr.in_range); } void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, @@ -1110,7 +1125,10 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - intel_dp->link_trained = true; + struct intel_display *display = to_intel_display(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + intel_dp->link.active = true; intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, @@ -1120,6 +1138,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp, wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); } + + intel_hpd_unblock(encoder); + + if (!display->hotplug.ignore_long_hpd && + intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) { + int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000; + + intel_encoder_link_check_queue_work(encoder, delay_ms); + } } static bool @@ -1602,7 +1629,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, * non-transparent mode. During an earlier LTTPR detection this * could've been prevented by an active link. */ - int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); + int lttpr_count; + + intel_hpd_block(encoder); + + lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); if (lttpr_count < 0) /* Still continue with enabling the port and link training. */ @@ -1620,7 +1651,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n"); } else if (passed) { intel_dp->link.seq_train_failures = 0; - intel_encoder_link_check_queue_work(encoder, 2000); return; } @@ -1643,10 +1673,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, return; } - if (intel_dp->link.seq_train_failures < 2) { - intel_encoder_link_check_queue_work(encoder, 0); + if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) return; - } if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state)) return; @@ -1693,7 +1721,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data) if (err) return err; - if (intel_dp->link_trained) + if (intel_dp->link.active) current_rate = intel_dp->link_rate; force_rate = intel_dp->link.force_rate; @@ -1791,7 +1819,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data) if (err) return err; - if (intel_dp->link_trained) + if (intel_dp->link.active) current_lane_count = intel_dp->lane_count; force_lane_count = intel_dp->link.force_lane_count; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 02f95108c637..4c15dcb103aa 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -52,6 +52,7 @@ #include "intel_pfit.h" #include "intel_psr.h" #include "intel_vdsc.h" +#include "intel_vrr.h" #include "skl_scaler.h" /* @@ -104,6 +105,34 @@ static struct intel_dp *to_primary_dp(struct intel_encoder *encoder) return &dig_port->dp; } +int intel_dp_mst_active_streams(struct intel_dp *intel_dp) +{ + return intel_dp->mst.active_streams; +} + +static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + + drm_dbg_kms(display->drm, "active MST streams %d -> %d\n", + intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1); + + if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0)) + return true; + + return --intel_dp->mst.active_streams == 0; +} + +static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp) +{ + struct intel_display *display = to_intel_display(intel_dp); + + drm_dbg_kms(display->drm, "active MST streams %d -> %d\n", + intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1); + + return intel_dp->mst.active_streams++ == 0; +} + static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, bool dsc) { @@ -710,6 +739,8 @@ static int mst_stream_compute_config(struct intel_encoder *encoder, pipe_config->lane_lat_optim_mask = bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); + intel_vrr_compute_config(pipe_config, conn_state); + intel_dp_audio_compute_config(encoder, pipe_config, conn_state); intel_ddi_compute_min_voltage_level(pipe_config); @@ -997,11 +1028,8 @@ static void mst_stream_disable(struct intel_atomic_state *state, to_intel_connector(old_conn_state->connector); enum transcoder trans = old_crtc_state->cpu_transcoder; - drm_dbg_kms(display->drm, "active links %d\n", - intel_dp->mst.active_links); - - if (intel_dp->mst.active_links == 1) - intel_dp->link_trained = false; + if (intel_dp_mst_active_streams(intel_dp) == 1) + intel_dp->link.active = false; intel_hdcp_disable(intel_mst->connector); @@ -1034,8 +1062,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state, bool last_mst_stream; int i; - intel_dp->mst.active_links--; - last_mst_stream = intel_dp->mst.active_links == 0; + last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp); + drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream && !intel_dp_mst_is_master_trans(old_crtc_state)); @@ -1062,6 +1090,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state, drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state, old_payload, new_payload); + intel_vrr_transcoder_disable(old_crtc_state); + intel_ddi_disable_transcoder_func(old_crtc_state); for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { @@ -1104,8 +1134,6 @@ static void mst_stream_post_disable(struct intel_atomic_state *state, primary_encoder->post_disable(state, primary_encoder, old_crtc_state, NULL); - drm_dbg_kms(display->drm, "active links %d\n", - intel_dp->mst.active_links); } static void mst_stream_post_pll_disable(struct intel_atomic_state *state, @@ -1116,7 +1144,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state, struct intel_encoder *primary_encoder = to_primary_encoder(encoder); struct intel_dp *intel_dp = to_primary_dp(encoder); - if (intel_dp->mst.active_links == 0 && + if (intel_dp_mst_active_streams(intel_dp) == 0 && primary_encoder->post_pll_disable) primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state); } @@ -1129,7 +1157,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state, struct intel_encoder *primary_encoder = to_primary_encoder(encoder); struct intel_dp *intel_dp = to_primary_dp(encoder); - if (intel_dp->mst.active_links == 0) + if (intel_dp_mst_active_streams(intel_dp) == 0) primary_encoder->pre_pll_enable(state, primary_encoder, pipe_config, NULL); else @@ -1189,13 +1217,11 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state, */ connector->encoder = encoder; intel_mst->connector = connector; - first_mst_stream = intel_dp->mst.active_links == 0; + + first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp); drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream && !intel_dp_mst_is_master_trans(pipe_config)); - drm_dbg_kms(display->drm, "active links %d\n", - intel_dp->mst.active_links); - if (first_mst_stream) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); @@ -1210,8 +1236,6 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state, intel_mst_reprobe_topology(intel_dp, pipe_config); } - intel_dp->mst.active_links++; - ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state, drm_atomic_get_mst_payload_state(mst_state, connector->mst.port)); if (ret < 0) @@ -1279,7 +1303,7 @@ static void mst_stream_enable(struct intel_atomic_state *state, struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr); enum transcoder trans = pipe_config->cpu_transcoder; - bool first_mst_stream = intel_dp->mst.active_links == 1; + bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1; struct intel_crtc *pipe_crtc; int ret, i, min_hblank; @@ -1323,14 +1347,13 @@ static void mst_stream_enable(struct intel_atomic_state *state, intel_ddi_enable_transcoder_func(encoder, pipe_config); + intel_vrr_transcoder_enable(pipe_config); + intel_ddi_clear_act_sent(encoder, pipe_config); intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0, TRANS_DDI_DP_VC_PAYLOAD_ALLOC); - drm_dbg_kms(display->drm, "active links %d\n", - intel_dp->mst.active_links); - intel_ddi_wait_for_act_sent(encoder, pipe_config); drm_dp_check_act_status(&intel_dp->mst.mgr); @@ -1870,12 +1893,6 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port) } int -intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port) -{ - return dig_port->dp.mst.active_links; -} - -int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) { struct intel_display *display = to_intel_display(dig_port); @@ -2101,7 +2118,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp) u8 rate_select; u8 link_bw; - if (intel_dp->link_trained) + if (intel_dp->link.active) return; if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count)) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index c1bbfeb02ca9..ab09b487c6bb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -18,7 +18,7 @@ struct intel_link_bw_limits; int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port); -int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port); +int intel_dp_mst_active_streams(struct intel_dp *intel_dp); bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_source_support(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 08a30e5aafce..0481b1365b85 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -373,14 +373,15 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) - return i915->display.vbt.lvds_ssc_freq; + return display->vbt.lvds_ssc_freq; else if (HAS_PCH_SPLIT(i915)) return 120000; - else if (DISPLAY_VER(i915) != 2) + else if (DISPLAY_VER(display) != 2) return 96000; else return 48000; @@ -389,27 +390,27 @@ static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state) void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, struct intel_dpll_hw_state *dpll_hw_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx; - if (DISPLAY_VER(dev_priv) >= 4) { + if (DISPLAY_VER(display) >= 4) { u32 tmp; /* No way to read it out on pipes B and C */ - if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) - tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; + if (display->platform.cherryview && crtc->pipe != PIPE_A) + tmp = display->state.chv_dpll_md[crtc->pipe]; else - tmp = intel_de_read(dev_priv, - DPLL_MD(dev_priv, crtc->pipe)); + tmp = intel_de_read(display, + DPLL_MD(display, crtc->pipe)); hw_state->dpll_md = tmp; } - hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe)); + hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe)); - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe)); - hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe)); + if (!display->platform.valleyview && !display->platform.cherryview) { + hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe)); + hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe)); } else { /* Mask out read-only status bits. */ hw_state->dpll &= ~(DPLL_LOCK_VLV | @@ -421,8 +422,8 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, /* Returns the clock of the currently programmed mode of the given pipe. */ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; u32 dpll = hw_state->dpll; u32 fp; @@ -436,7 +437,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) fp = hw_state->fp1; clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; - if (IS_PINEVIEW(dev_priv)) { + if (display->platform.pineview) { clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; } else { @@ -444,8 +445,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } - if (DISPLAY_VER(dev_priv) != 2) { - if (IS_PINEVIEW(dev_priv)) + if (DISPLAY_VER(display) != 2) { + if (display->platform.pineview) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); else @@ -462,23 +463,23 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state) 7 : 14; break; default: - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); return; } - if (IS_PINEVIEW(dev_priv)) + if (display->platform.pineview) port_clock = pnv_calc_dpll_params(refclk, &clock); else port_clock = i9xx_calc_dpll_params(refclk, &clock); } else { enum pipe lvds_pipe; - if (IS_I85X(dev_priv) && - intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && + if (display->platform.i85x && + intel_lvds_port_enabled(display, LVDS, &lvds_pipe) && lvds_pipe == crtc->pipe) { - u32 lvds = intel_de_read(dev_priv, LVDS); + u32 lvds = intel_de_read(display, LVDS); clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); @@ -577,7 +578,7 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state) * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ -static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, +static bool intel_pll_is_valid(struct intel_display *display, const struct intel_limit *limit, const struct dpll *clock) { @@ -590,14 +591,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv, if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) return false; - if (!IS_PINEVIEW(dev_priv) && - !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) + if (!display->platform.pineview && + !display->platform.valleyview && !display->platform.cherryview && + !display->platform.broxton && !display->platform.geminilake) if (clock->m1 <= clock->m2) return false; - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) { + if (!display->platform.valleyview && !display->platform.cherryview && + !display->platform.broxton && !display->platform.geminilake) { if (clock->p < limit->p.min || limit->p.max < clock->p) return false; if (clock->m < limit->m.min || limit->m.max < clock->m) @@ -620,7 +621,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* @@ -628,7 +629,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, * We haven't figured out how to reliably set up different * single/dual channel state, if we even can. */ - if (intel_is_dual_link_lvds(dev_priv)) + if (intel_is_dual_link_lvds(display)) return limit->p2.p2_fast; else return limit->p2.p2_slow; @@ -656,7 +657,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit, const struct dpll *match_clock, struct dpll *best_clock) { - struct drm_device *dev = crtc_state->uapi.crtc->dev; + struct intel_display *display = to_intel_display(crtc_state); struct dpll clock; int err = target; @@ -677,7 +678,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit, int this_err; i9xx_calc_dpll_params(refclk, &clock); - if (!intel_pll_is_valid(to_i915(dev), + if (!intel_pll_is_valid(display, limit, &clock)) continue; @@ -714,7 +715,7 @@ pnv_find_best_dpll(const struct intel_limit *limit, const struct dpll *match_clock, struct dpll *best_clock) { - struct drm_device *dev = crtc_state->uapi.crtc->dev; + struct intel_display *display = to_intel_display(crtc_state); struct dpll clock; int err = target; @@ -733,7 +734,7 @@ pnv_find_best_dpll(const struct intel_limit *limit, int this_err; pnv_calc_dpll_params(refclk, &clock); - if (!intel_pll_is_valid(to_i915(dev), + if (!intel_pll_is_valid(display, limit, &clock)) continue; @@ -770,7 +771,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, const struct dpll *match_clock, struct dpll *best_clock) { - struct drm_device *dev = crtc_state->uapi.crtc->dev; + struct intel_display *display = to_intel_display(crtc_state); struct dpll clock; int max_n; bool found = false; @@ -794,7 +795,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, int this_err; i9xx_calc_dpll_params(refclk, &clock); - if (!intel_pll_is_valid(to_i915(dev), + if (!intel_pll_is_valid(display, limit, &clock)) continue; @@ -817,7 +818,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, * Check if the calculated PLL configuration is more optimal compared to the * best configuration and error found so far. Return the calculated error. */ -static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, +static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq, const struct dpll *calculated_clock, const struct dpll *best_clock, unsigned int best_error_ppm, @@ -827,13 +828,13 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, * For CHV ignore the error and consider only the P value. * Prefer a bigger P value based on HW requirements. */ - if (IS_CHERRYVIEW(to_i915(dev))) { + if (display->platform.cherryview) { *error_ppm = 0; return calculated_clock->p > best_clock->p; } - if (drm_WARN_ON_ONCE(dev, !target_freq)) + if (drm_WARN_ON_ONCE(display->drm, !target_freq)) return false; *error_ppm = div_u64(1000000ULL * @@ -864,8 +865,7 @@ vlv_find_best_dpll(const struct intel_limit *limit, const struct dpll *match_clock, struct dpll *best_clock) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; + struct intel_display *display = to_intel_display(crtc_state); struct dpll clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ @@ -889,12 +889,12 @@ vlv_find_best_dpll(const struct intel_limit *limit, vlv_calc_dpll_params(refclk, &clock); - if (!intel_pll_is_valid(to_i915(dev), + if (!intel_pll_is_valid(display, limit, &clock)) continue; - if (!vlv_PLL_is_optimal(dev, target, + if (!vlv_PLL_is_optimal(display, target, &clock, best_clock, bestppm, &ppm)) @@ -922,8 +922,7 @@ chv_find_best_dpll(const struct intel_limit *limit, const struct dpll *match_clock, struct dpll *best_clock) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; + struct intel_display *display = to_intel_display(crtc_state); unsigned int best_error_ppm; struct dpll clock; u64 m2; @@ -958,10 +957,10 @@ chv_find_best_dpll(const struct intel_limit *limit, chv_calc_dpll_params(refclk, &clock); - if (!intel_pll_is_valid(to_i915(dev), limit, &clock)) + if (!intel_pll_is_valid(display, limit, &clock)) continue; - if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, + if (!vlv_PLL_is_optimal(display, target, &clock, best_clock, best_error_ppm, &error_ppm)) continue; @@ -1005,8 +1004,6 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *reduced_clock) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; @@ -1016,8 +1013,8 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, else dpll |= DPLLB_MODE_DAC_SERIAL; - if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || - IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { + if (display->platform.i945g || display->platform.i945gm || + display->platform.g33 || display->platform.pineview) { dpll |= (crtc_state->pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } @@ -1030,10 +1027,10 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ - if (IS_G4X(dev_priv)) { + if (display->platform.g4x) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - } else if (IS_PINEVIEW(dev_priv)) { + } else if (display->platform.pineview) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; WARN_ON(reduced_clock->p1 != clock->p1); } else { @@ -1057,7 +1054,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state, } WARN_ON(reduced_clock->p2 != clock->p2); - if (DISPLAY_VER(dev_priv) >= 4) + if (DISPLAY_VER(display) >= 4) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); if (crtc_state->sdvo_tv_clock) @@ -1075,11 +1072,10 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, const struct dpll *clock, const struct dpll *reduced_clock) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; - if (IS_PINEVIEW(dev_priv)) { + if (display->platform.pineview) { hw_state->fp0 = pnv_dpll_compute_fp(clock); hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock); } else { @@ -1089,7 +1085,7 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock); - if (DISPLAY_VER(dev_priv) >= 4) + if (DISPLAY_VER(display) >= 4) hw_state->dpll_md = i965_dpll_md(crtc_state); } @@ -1098,8 +1094,6 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *reduced_clock) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS; @@ -1129,7 +1123,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state, * both DPLLS. The spec says we should disable the DVO 2X clock * when not needed, but this seems to work fine in practice. */ - if (IS_I830(dev_priv) || + if (display->platform.i830 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; @@ -1157,14 +1151,14 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, static int hsw_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); int ret; - if (DISPLAY_VER(dev_priv) < 11 && + if (DISPLAY_VER(display) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; @@ -1186,13 +1180,13 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state, static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - if (DISPLAY_VER(dev_priv) < 11 && + if (DISPLAY_VER(display) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; @@ -1245,8 +1239,8 @@ static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state) struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) || - (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915)))) + ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) || + (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(display)))) return 25; if (crtc_state->sdvo_tv_clock) @@ -1276,8 +1270,6 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, const struct dpll *reduced_clock) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; dpll = DPLL_VCO_ENABLE; @@ -1311,7 +1303,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state, * clear if it''s a win or loss power wise. No point in doing * this on ILK at all since it has a fixed DPLL<->pipe mapping. */ - if (INTEL_NUM_PIPES(dev_priv) == 3 && + if (INTEL_NUM_PIPES(display) == 3 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) dpll |= DPLL_SDVO_HIGH_SPEED; @@ -1362,7 +1354,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; @@ -1375,13 +1366,13 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(display)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "using SSC reference clock of %d kHz\n", - dev_priv->display.vbt.lvds_ssc_freq); - refclk = dev_priv->display.vbt.lvds_ssc_freq; + display->vbt.lvds_ssc_freq); + refclk = display->vbt.lvds_ssc_freq; } - if (intel_is_dual_link_lvds(dev_priv)) { + if (intel_is_dual_link_lvds(display)) { if (refclk == 100000) limit = &ilk_limits_dual_lvds_100m; else @@ -1539,7 +1530,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; @@ -1547,13 +1537,13 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(display)) { - refclk = dev_priv->display.vbt.lvds_ssc_freq; - drm_dbg_kms(&dev_priv->drm, + refclk = display->vbt.lvds_ssc_freq; + drm_dbg_kms(display->drm, "using SSC reference clock of %d kHz\n", refclk); } - if (intel_is_dual_link_lvds(dev_priv)) + if (intel_is_dual_link_lvds(display)) limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; @@ -1589,7 +1579,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; @@ -1597,8 +1586,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(display)) { - refclk = dev_priv->display.vbt.lvds_ssc_freq; - drm_dbg_kms(&dev_priv->drm, + refclk = display->vbt.lvds_ssc_freq; + drm_dbg_kms(display->drm, "using SSC reference clock of %d kHz\n", refclk); } @@ -1628,7 +1617,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; @@ -1636,8 +1624,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(display)) { - refclk = dev_priv->display.vbt.lvds_ssc_freq; - drm_dbg_kms(&dev_priv->drm, + refclk = display->vbt.lvds_ssc_freq; + drm_dbg_kms(display->drm, "using SSC reference clock of %d kHz\n", refclk); } @@ -1669,7 +1657,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; @@ -1677,8 +1664,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(display)) { - refclk = dev_priv->display.vbt.lvds_ssc_freq; - drm_dbg_kms(&dev_priv->drm, + refclk = display->vbt.lvds_ssc_freq; + drm_dbg_kms(display->drm, "using SSC reference clock of %d kHz\n", refclk); } @@ -1751,12 +1738,12 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = { int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; - drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state)); memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -1764,9 +1751,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, if (!crtc_state->hw.enable) return 0; - ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc); + ret = display->funcs.dpll->crtc_compute_clock(state, crtc); if (ret) { - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", + drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", crtc->base.base.id, crtc->base.name); return ret; } @@ -1777,23 +1764,23 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); int ret; - drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); - drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); + drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state)); + drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); if (!crtc_state->hw.enable || crtc_state->shared_dpll) return 0; - if (!i915->display.funcs.dpll->crtc_get_shared_dpll) + if (!display->funcs.dpll->crtc_get_shared_dpll) return 0; - ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc); + ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc); if (ret) { - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", + drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", crtc->base.base.id, crtc->base.name); return ret; } @@ -1802,43 +1789,44 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, } void -intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 14) - dev_priv->display.funcs.dpll = &mtl_dpll_funcs; - else if (IS_DG2(dev_priv)) - dev_priv->display.funcs.dpll = &dg2_dpll_funcs; - else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) - dev_priv->display.funcs.dpll = &hsw_dpll_funcs; +intel_dpll_init_clock_hook(struct intel_display *display) +{ + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (DISPLAY_VER(display) >= 14) + display->funcs.dpll = &mtl_dpll_funcs; + else if (display->platform.dg2) + display->funcs.dpll = &dg2_dpll_funcs; + else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display)) + display->funcs.dpll = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) - dev_priv->display.funcs.dpll = &ilk_dpll_funcs; - else if (IS_CHERRYVIEW(dev_priv)) - dev_priv->display.funcs.dpll = &chv_dpll_funcs; - else if (IS_VALLEYVIEW(dev_priv)) - dev_priv->display.funcs.dpll = &vlv_dpll_funcs; - else if (IS_G4X(dev_priv)) - dev_priv->display.funcs.dpll = &g4x_dpll_funcs; - else if (IS_PINEVIEW(dev_priv)) - dev_priv->display.funcs.dpll = &pnv_dpll_funcs; - else if (DISPLAY_VER(dev_priv) != 2) - dev_priv->display.funcs.dpll = &i9xx_dpll_funcs; + display->funcs.dpll = &ilk_dpll_funcs; + else if (display->platform.cherryview) + display->funcs.dpll = &chv_dpll_funcs; + else if (display->platform.valleyview) + display->funcs.dpll = &vlv_dpll_funcs; + else if (display->platform.g4x) + display->funcs.dpll = &g4x_dpll_funcs; + else if (display->platform.pineview) + display->funcs.dpll = &pnv_dpll_funcs; + else if (DISPLAY_VER(display) != 2) + display->funcs.dpll = &i9xx_dpll_funcs; else - dev_priv->display.funcs.dpll = &i8xx_dpll_funcs; + display->funcs.dpll = &i8xx_dpll_funcs; } -static bool i9xx_has_pps(struct drm_i915_private *dev_priv) +static bool i9xx_has_pps(struct intel_display *display) { - if (IS_I830(dev_priv)) + if (display->platform.i830) return false; - return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); + return display->platform.pineview || display->platform.mobile; } void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; int i; @@ -1846,27 +1834,27 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) assert_transcoder_disabled(display, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ - if (i9xx_has_pps(dev_priv)) + if (i9xx_has_pps(display)) assert_pps_unlocked(display, pipe); - intel_de_write(dev_priv, FP0(pipe), hw_state->fp0); - intel_de_write(dev_priv, FP1(pipe), hw_state->fp1); + intel_de_write(display, FP0(pipe), hw_state->fp0); + intel_de_write(display, FP1(pipe), hw_state->fp1); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), + intel_de_write(display, DPLL(display, pipe), hw_state->dpll & ~DPLL_VGA_MODE_DIS); - intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); + intel_de_write(display, DPLL(display, pipe), hw_state->dpll); /* Wait for the clocks to stabilize. */ - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); - if (DISPLAY_VER(dev_priv) >= 4) { - intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), + if (DISPLAY_VER(display) >= 4) { + intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md); } else { /* The pixel multiplier can only be updated once the @@ -1874,20 +1862,21 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) * * So write it again. */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); + intel_de_write(display, DPLL(display, pipe), hw_state->dpll); } /* We do this three times for luck */ for (i = 0; i < 3; i++) { - intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_write(display, DPLL(display, pipe), hw_state->dpll); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); /* wait for warmup */ } } -static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, +static void vlv_pllb_recal_opamp(struct intel_display *display, enum dpio_phy phy, enum dpio_channel ch) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 tmp; /* @@ -1916,6 +1905,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct dpll *clock = &crtc_state->dpll; @@ -1930,7 +1920,7 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) /* PLL B needs special handling */ if (pipe == PIPE_B) - vlv_pllb_recal_opamp(dev_priv, phy, ch); + vlv_pllb_recal_opamp(display, phy, ch); /* Set up Tx target for periodic Rcomp update */ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f); @@ -2003,24 +1993,23 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); - intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe)); + intel_de_write(display, DPLL(display, pipe), hw_state->dpll); + intel_de_posting_read(display, DPLL(display, pipe)); udelay(150); - if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1)) - drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); + if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1)) + drm_err(display->drm, "DPLL %d failed to lock\n", pipe); } void vlv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; @@ -2030,7 +2019,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state) assert_pps_unlocked(display, pipe); /* Enable Refclk */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), + intel_de_write(display, DPLL(display, pipe), hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); if (hw_state->dpll & DPLL_VCO_ENABLE) { @@ -2038,8 +2027,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state) _vlv_enable_pll(crtc_state); } - intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md); - intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe)); + intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md); + intel_de_posting_read(display, DPLL_MD(display, pipe)); } static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) @@ -2133,6 +2122,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; @@ -2156,18 +2146,17 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) udelay(1); /* Enable PLL */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll); + intel_de_write(display, DPLL(display, pipe), hw_state->dpll); /* Check PLL is locked */ - if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1)) - drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); + if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1)) + drm_err(display->drm, "PLL %d failed to lock\n", pipe); } void chv_enable_pll(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx; enum pipe pipe = crtc->pipe; @@ -2177,7 +2166,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) assert_pps_unlocked(display, pipe); /* Enable Refclk and SSC */ - intel_de_write(dev_priv, DPLL(dev_priv, pipe), + intel_de_write(display, DPLL(display, pipe), hw_state->dpll & ~DPLL_VCO_ENABLE); if (hw_state->dpll & DPLL_VCO_ENABLE) { @@ -2192,29 +2181,29 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) * DPLLCMD is AWOL. Use chicken bits to propagate * the value from DPLLBMD to either pipe B or C. */ - intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); - intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B), + intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); + intel_de_write(display, DPLL_MD(display, PIPE_B), hw_state->dpll_md); - intel_de_write(dev_priv, CBR4_VLV, 0); - dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md; + intel_de_write(display, CBR4_VLV, 0); + display->state.chv_dpll_md[pipe] = hw_state->dpll_md; /* * DPLLB VGA mode also seems to cause problems. * We should always have it disabled. */ - drm_WARN_ON(&dev_priv->drm, - (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & + drm_WARN_ON(display->drm, + (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); } else { - intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), + intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md); - intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe)); + intel_de_posting_read(display, DPLL_MD(display, pipe)); } } /** * vlv_force_pll_on - forcibly enable just the PLL - * @dev_priv: i915 private structure + * @display: display device * @pipe: pipe PLL to enable * @dpll: PLL configuration * @@ -2222,10 +2211,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) * in cases where we need the PLL enabled even when @pipe is not going to * be enabled. */ -int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, +int vlv_force_pll_on(struct intel_display *display, enum pipe pipe, const struct dpll *dpll) { - struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_crtc_state *crtc_state; @@ -2238,7 +2226,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, crtc_state->dpll = *dpll; crtc_state->output_types = BIT(INTEL_OUTPUT_EDP); - if (IS_CHERRYVIEW(dev_priv)) { + if (display->platform.cherryview) { chv_compute_dpll(crtc_state); chv_enable_pll(crtc_state); } else { @@ -2251,9 +2239,8 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, return 0; } -void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +void vlv_disable_pll(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; u32 val; /* Make sure the pipe isn't still relying on us */ @@ -2268,9 +2255,9 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) intel_de_posting_read(display, DPLL(display, pipe)); } -void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +void chv_disable_pll(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum dpio_channel ch = vlv_pipe_to_channel(pipe); enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; @@ -2316,18 +2303,18 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) /** * vlv_force_pll_off - forcibly disable just the PLL - * @dev_priv: i915 private structure + * @display: display device * @pipe: pipe PLL to disable * * Disable the PLL for @pipe. To be used in cases where we need * the PLL enabled even when @pipe is not going to be enabled. */ -void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) +void vlv_force_pll_off(struct intel_display *display, enum pipe pipe) { - if (IS_CHERRYVIEW(dev_priv)) - chv_disable_pll(dev_priv, pipe); + if (display->platform.cherryview) + chv_disable_pll(display, pipe); else - vlv_disable_pll(dev_priv, pipe); + vlv_disable_pll(display, pipe); } /* Only for pre-ILK configs */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index 21d06cbd2ce7..280e90a57c87 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -8,16 +8,15 @@ #include <linux/types.h> +enum pipe; struct dpll; -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_display; struct intel_dpll_hw_state; -enum pipe; -void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); +void intel_dpll_init_clock_hook(struct intel_display *display); int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, @@ -29,14 +28,14 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc, void vlv_compute_dpll(struct intel_crtc_state *crtc_state); void chv_compute_dpll(struct intel_crtc_state *crtc_state); -int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, +int vlv_force_pll_on(struct intel_display *display, enum pipe pipe, const struct dpll *dpll); -void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); +void vlv_force_pll_off(struct intel_display *display, enum pipe pipe); void chv_enable_pll(const struct intel_crtc_state *crtc_state); -void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); +void chv_disable_pll(struct intel_display *display, enum pipe pipe); void vlv_enable_pll(const struct intel_crtc_state *crtc_state); -void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); +void vlv_disable_pll(struct intel_display *display, enum pipe pipe); void i9xx_enable_pll(const struct intel_crtc_state *crtc_state); void i9xx_disable_pll(const struct intel_crtc_state *crtc_state); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index c825a507b905..84df41086a89 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -257,7 +257,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - unsigned int pipe_mask = BIT(crtc->pipe); + unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state); unsigned int old_mask; if (drm_WARN_ON(display->drm, !pll)) @@ -303,7 +303,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - unsigned int pipe_mask = BIT(crtc->pipe); + unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state); /* PCH only available on ILK+ */ if (DISPLAY_VER(display) < 5) @@ -715,7 +715,6 @@ static void hsw_ddi_spll_enable(struct intel_display *display, static void hsw_ddi_wrpll_disable(struct intel_display *display, struct intel_shared_dpll *pll) { - struct drm_i915_private *i915 = to_i915(display->drm); const enum intel_dpll_id id = pll->info->id; intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); @@ -726,13 +725,12 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display, * that depend on it have been shut down. */ if (display->dpll.pch_ssc_use & BIT(id)) - intel_init_pch_refclk(i915); + intel_init_pch_refclk(display); } static void hsw_ddi_spll_disable(struct intel_display *display, struct intel_shared_dpll *pll) { - struct drm_i915_private *i915 = to_i915(display->drm); enum intel_dpll_id id = pll->info->id; intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0); @@ -743,7 +741,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display, * that depend on it have been shut down. */ if (display->dpll.pch_ssc_use & BIT(id)) - intel_init_pch_refclk(i915); + intel_init_pch_refclk(display); } static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display, @@ -2606,10 +2604,8 @@ ehl_combo_pll_div_frac_wa_needed(struct intel_display *display) { return ((display->platform.elkhartlake && IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) || - display->platform.tigerlake || - display->platform.alderlake_s || - display->platform.alderlake_p) && - display->dpll.ref_clks.nssc == 38400; + DISPLAY_VER(display) >= 12) && + display->dpll.ref_clks.nssc == 38400; } struct icl_combo_pll_params { diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 0d8ebe38226e..43bd97e4f589 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -9,6 +9,7 @@ #include "gt/gen8_ppgtt.h" #include "i915_drv.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" @@ -127,7 +128,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm, struct drm_i915_private *i915 = vm->i915; struct intel_display *display = &i915->display; struct i915_dpt *dpt = i915_vm_to_dpt(vm); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; struct i915_vma *vma; void __iomem *iomem; struct i915_gem_ww_ctx ww; @@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm, if (i915_gem_object_is_stolen(dpt->obj)) pin_flags |= PIN_MAPPABLE; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); atomic_inc(&display->restore.pending_fb_pin); for_i915_gem_ww(&ww, err, true) { @@ -169,7 +170,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm, dpt->obj->mm.dirty = true; atomic_dec(&display->restore.pending_fb_pin); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return err ? ERR_PTR(err) : vma; } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 9fc4003d1579..72fe390c5af2 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -11,6 +11,7 @@ #include "i915_reg.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dsb.h" #include "intel_dsb_buffer.h" @@ -142,10 +143,10 @@ static int dsb_vtotal(struct intel_atomic_state *state, static int dsb_dewake_scanline_start(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *crtc_state = intel_pre_commit_crtc_state(state, crtc); - struct drm_i915_private *i915 = to_i915(state->base.dev); - unsigned int latency = skl_watermark_max_latency(i915, 0); + unsigned int latency = skl_watermark_max_latency(display, 0); return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) - intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency); @@ -795,22 +796,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, enum intel_dsb_id dsb_id, unsigned int max_cmds) { - struct drm_i915_private *i915 = to_i915(state->base.dev); - intel_wakeref_t wakeref; + struct intel_display *display = to_intel_display(state); + struct ref_tracker *wakeref; struct intel_dsb *dsb; unsigned int size; - if (!HAS_DSB(i915)) + if (!HAS_DSB(display)) return NULL; - if (!i915->display.params.enable_dsb) + if (!display->params.enable_dsb) return NULL; dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); if (!dsb) goto out; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); /* ~1 qword per instruction, full cachelines */ size = ALIGN(max_cmds * 8, CACHELINE_BYTES); @@ -818,7 +819,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size)) goto out_put_rpm; - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); dsb->id = dsb_id; dsb->crtc = crtc; @@ -831,10 +832,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, return dsb; out_put_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); kfree(dsb); out: - drm_info_once(&i915->drm, + drm_info_once(display->drm, "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", crtc->base.base.id, crtc->base.name, dsb_id); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 049443245310..b3c453bf7d5c 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -24,9 +24,10 @@ */ #include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> #include <video/mipi_display.h> -#include "i915_drv.h" +#include "intel_display_core.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_dcs_backlight.h" @@ -162,7 +163,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, static int dcs_setup_backlight(struct intel_connector *connector, enum pipe unused) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_panel *panel = &connector->panel; if (panel->vbt.backlight.brightness_precision_bits > 8) @@ -172,7 +173,7 @@ static int dcs_setup_backlight(struct intel_connector *connector, panel->backlight.level = panel->backlight.max; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Using DCS for backlight control\n", connector->base.base.id, connector->base.name); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 7b2ffd14ae6e..4e92504f5c14 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -102,13 +102,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct mipi_dsi_device *dsi_device; u8 type, flags, seq_port; u16 len; enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); flags = *data++; type = *data++; @@ -120,12 +120,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, port = intel_dsi_seq_port_to_port(intel_dsi, seq_port); - if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port])) + if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port])) goto out; dsi_device = intel_dsi->dsi_hosts[port]->device; if (!dsi_device) { - drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n", + drm_dbg_kms(display->drm, "no dsi device for port %c\n", port_name(port)); goto out; } @@ -150,8 +150,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: - drm_dbg(&dev_priv->drm, - "Generic Read not yet implemented or used\n"); + drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n"); break; case MIPI_DSI_GENERIC_LONG_WRITE: mipi_dsi_generic_write(dsi_device, data, len); @@ -163,15 +162,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, mipi_dsi_dcs_write_buffer(dsi_device, data, 2); break; case MIPI_DSI_DCS_READ: - drm_dbg(&dev_priv->drm, - "DCS Read not yet implemented or used\n"); + drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n"); break; case MIPI_DSI_DCS_LONG_WRITE: mipi_dsi_dcs_write_buffer(dsi_device, data, len); break; } - if (DISPLAY_VER(dev_priv) < 11) + if (DISPLAY_VER(display) < 11) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: @@ -182,10 +180,10 @@ out: static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); u32 delay = *((const u32 *) data); - drm_dbg_kms(&i915->drm, "%d usecs\n", delay); + drm_dbg_kms(display->drm, "%d usecs\n", delay); usleep_range(delay, delay + 10); data += 4; @@ -196,7 +194,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index, const char *con_id, u8 idx, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); /* XXX: this table is a quick ugly hack. */ static struct gpio_desc *soc_gpio_table[U8_MAX + 1]; struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index]; @@ -204,10 +202,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index, if (gpio_desc) { gpiod_set_value(gpio_desc, value); } else { - gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx, + gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx, value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW); if (IS_ERR(gpio_desc)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "GPIO index %u request failed (%pe)\n", gpio_index, gpio_desc); return; @@ -242,16 +240,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector, static void vlv_gpio_set_value(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ if (connector->panel.vbt.dsi.seq_version < 3) { if (gpio_source == 1) { - drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n"); + drm_dbg_kms(display->drm, "SC gpio not supported\n"); return; } if (gpio_source > 1) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "unknown gpio source %u\n", gpio_source); return; } @@ -264,7 +262,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector, static void chv_gpio_set_value(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); if (connector->panel.vbt.dsi.seq_version >= 3) { if (gpio_index >= CHV_GPIO_IDX_START_SE) { @@ -284,13 +282,13 @@ static void chv_gpio_set_value(struct intel_connector *connector, } else { /* XXX: The spec is unclear about CHV GPIO on seq v2 */ if (gpio_source != 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "unknown gpio source %u\n", gpio_source); return; } if (gpio_index >= CHV_GPIO_IDX_START_E) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "invalid gpio index %u for GPIO N\n", gpio_index); return; @@ -320,13 +318,13 @@ enum { MIPI_VIO_EN_2, }; -static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, +static void icl_native_gpio_set_value(struct intel_display *display, int gpio, bool value) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); int index; - if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) + if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2)) return; switch (gpio) { @@ -344,7 +342,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, * modifications in irq setup and handling. */ spin_lock_irq(&dev_priv->irq_lock); - intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_DDI, SHOTPLUG_CTL_DDI_HPD_ENABLE(index) | SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index), value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0); @@ -354,14 +352,14 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, case MIPI_AVDD_EN_2: index = gpio == MIPI_AVDD_EN_1 ? 0 : 1; - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON, + intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON, value ? PANEL_POWER_ON : 0); break; case MIPI_BKLT_EN_1: case MIPI_BKLT_EN_2: index = gpio == MIPI_BKLT_EN_1 ? 0 : 1; - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE, + intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE, value ? EDP_BLC_ENABLE : 0); break; case MIPI_AVEE_EN_1: @@ -389,13 +387,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *i915 = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; u8 gpio_source = 0, gpio_index = 0, gpio_number; bool value; int size; - bool native = DISPLAY_VER(i915) >= 11; + bool native = DISPLAY_VER(display) >= 11; if (connector->panel.vbt.dsi.seq_version >= 3) { size = 3; @@ -416,16 +413,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) gpio_source = (data[1] >> 1) & 3; } - drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", + drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); if (native) - icl_native_gpio_set_value(i915, gpio_number, value); - else if (DISPLAY_VER(i915) >= 9) + icl_native_gpio_set_value(display, gpio_number, value); + else if (DISPLAY_VER(display) >= 9) bxt_gpio_set_value(connector, gpio_index, value); - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) vlv_gpio_set_value(connector, gpio_source, gpio_number, value); - else if (IS_CHERRYVIEW(i915)) + else if (display->platform.cherryview) chv_gpio_set_value(connector, gpio_source, gpio_number, value); return data + size; @@ -463,8 +460,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 target_addr) { - struct drm_device *drm_dev = intel_dsi->base.base.dev; - struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct acpi_device *adev = ACPI_COMPANION(display->drm->dev); struct i2c_adapter_lookup lookup = { .target_addr = target_addr, .intel_dsi = intel_dsi, @@ -484,7 +481,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct i2c_adapter *adapter; struct i2c_msg msg; int ret; @@ -494,7 +491,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) u8 payload_size = *(data + 6); u8 *payload_data; - drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n", + drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n", vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7); if (intel_dsi->i2c_bus_num < 0) { @@ -504,7 +501,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); if (!adapter) { - drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n"); + drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n"); goto err_bus; } @@ -522,7 +519,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) ret = i2c_transfer(adapter, &msg, 1); if (ret < 0) - drm_err(&i915->drm, + drm_err(display->drm, "Failed to xfer payload of size (%u) to reg (%u)\n", payload_size, reg_offset); @@ -535,16 +532,16 @@ err_bus: static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); - drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n"); + drm_dbg_kms(display->drm, "Skipping SPI element execution\n"); return data + *(data + 5) + 6; } static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); #ifdef CONFIG_PMIC_OPREGION u32 value, mask, reg_address; u16 i2c_address; @@ -560,9 +557,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) reg_address, value, mask); if (ret) - drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret); + drm_err(display->drm, "%s failed, error: %d\n", __func__, ret); #else - drm_err(&i915->drm, + drm_err(display->drm, "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n"); #endif @@ -612,12 +609,12 @@ static const char *sequence_name(enum mipi_seq seq_id) static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, enum mipi_seq seq_id) { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; const u8 *data; fn_mipi_elem_exec mipi_elem_exec; - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence))) return; @@ -625,9 +622,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, if (!data) return; - drm_WARN_ON(&dev_priv->drm, *data != seq_id); + drm_WARN_ON(display->drm, *data != seq_id); - drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n", + drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n", seq_id, sequence_name(seq_id)); /* Skip Sequence Byte. */ @@ -657,19 +654,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, /* Consistency check if we have size. */ if (operation_size && data != next) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Inconsistent operation size\n"); return; } } else if (operation_size) { /* We have size, skip. */ - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Unsupported MIPI operation byte %u\n", operation_byte); data += operation_size; } else { /* No size, can't skip without parsing. */ - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unsupported MIPI operation byte %u\n", operation_byte); return; @@ -695,54 +692,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, void intel_dsi_log_params(struct intel_dsi *intel_dsi) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); - - drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk); - drm_dbg_kms(&i915->drm, "Pixel overlap %d\n", - intel_dsi->pixel_overlap); - drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count); - drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); - drm_dbg_kms(&i915->drm, "Video mode format %s\n", - intel_dsi->video_mode == NON_BURST_SYNC_PULSE ? - "non-burst with sync pulse" : - intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ? - "non-burst with sync events" : - intel_dsi->video_mode == BURST_MODE ? - "burst" : "<unknown>"); - drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n", - intel_dsi->burst_mode_ratio); - drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val); - drm_dbg_kms(&i915->drm, "Eot %s\n", - str_enabled_disabled(intel_dsi->eotp_pkt)); - drm_dbg_kms(&i915->drm, "Clockstop %s\n", - str_enabled_disabled(!intel_dsi->clock_stop)); - drm_dbg_kms(&i915->drm, "Mode %s\n", - intel_dsi->operation_mode ? "command" : "video"); + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, + "DSI parameters:"); + + drm_printf(&p, "Pclk %d\n", intel_dsi->pclk); + drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap); + drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count); + drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); + drm_printf(&p, "Video mode format %s\n", + intel_dsi->video_mode == NON_BURST_SYNC_PULSE ? + "non-burst with sync pulse" : + intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ? + "non-burst with sync events" : + intel_dsi->video_mode == BURST_MODE ? + "burst" : "<unknown>"); + drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); + drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val); + drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt)); + drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop)); + drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) - drm_dbg_kms(&i915->drm, - "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); + drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) - drm_dbg_kms(&i915->drm, - "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); + drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); else - drm_dbg_kms(&i915->drm, "Dual link: NONE\n"); - drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format); - drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div); - drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n", - intel_dsi->lp_rx_timeout); - drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n", - intel_dsi->turn_arnd_val); - drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count); - drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n", - intel_dsi->hs_to_lp_count); - drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk); - drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); - drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n", - intel_dsi->clk_lp_to_hs_count); - drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n", - intel_dsi->clk_hs_to_lp_count); - drm_dbg_kms(&i915->drm, "BTA %s\n", - str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); + drm_printf(&p, "Dual link: NONE\n"); + drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format); + drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div); + drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); + drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val); + drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count); + drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count); + drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk); + drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); + drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); + drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); + drm_printf(&p, "BTA %s\n", + str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); } static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format) @@ -764,8 +751,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format) bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps; @@ -773,7 +759,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) u16 burst_mode_ratio; enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; @@ -819,7 +805,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) u32 bitrate; if (mipi_config->target_burst_mode_freq == 0) { - drm_err(&dev_priv->drm, "Burst mode target is not set\n"); + drm_err(display->drm, "Burst mode target is not set\n"); return false; } @@ -836,7 +822,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) mipi_config->target_burst_mode_freq = bitrate; if (mipi_config->target_burst_mode_freq < bitrate) { - drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n"); + drm_err(display->drm, "Burst mode freq is less than computed\n"); return false; } @@ -900,8 +886,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = { void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; @@ -911,13 +896,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) struct pinctrl *pinctrl; int ret; - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if ((display->platform.valleyview || display->platform.cherryview) && mipi_config->pwm_blc == PPS_BLC_PMIC) { gpiod_lookup_table = &pmic_panel_gpio_table; want_panel_gpio = true; } - if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { + if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) { gpiod_lookup_table = &soc_panel_gpio_table; want_panel_gpio = true; want_backlight_gpio = true; @@ -926,12 +911,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) ret = pinctrl_register_mappings(soc_pwm_pinctrl_map, ARRAY_SIZE(soc_pwm_pinctrl_map)); if (ret) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to register pwm0 pinmux mapping\n"); - pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0"); + pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0"); if (IS_ERR(pinctrl)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to set pinmux to PWM\n"); } @@ -939,9 +924,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) gpiod_add_lookup_table(gpiod_lookup_table); if (want_panel_gpio) { - intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags); + intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags); if (IS_ERR(intel_dsi->gpio_panel)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to own gpio for panel control\n"); intel_dsi->gpio_panel = NULL; } @@ -949,9 +934,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) if (want_backlight_gpio) { intel_dsi->gpio_backlight = - devm_gpiod_get(dev->dev, "backlight", flags); + devm_gpiod_get(display->drm->dev, "backlight", flags); if (IS_ERR(intel_dsi->gpio_backlight)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to own gpio for backlight control\n"); intel_dsi->gpio_backlight = NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index c16fb34b737d..b61520353c92 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -31,10 +31,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> -#include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_driver.h" @@ -129,13 +130,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector) static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum port port = encoder->port; u32 tmp; - tmp = intel_de_read(i915, DVO(port)); + tmp = intel_de_read(display, DVO(port)); if (!(tmp & DVO_ENABLE)) return false; @@ -146,11 +147,11 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; u32 tmp; - tmp = intel_de_read(i915, DVO(port)); + tmp = intel_de_read(display, DVO(port)); *pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp); @@ -160,13 +161,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, static void intel_dvo_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); enum port port = encoder->port; u32 tmp, flags = 0; pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO); - tmp = intel_de_read(i915, DVO(port)); + tmp = intel_de_read(display, DVO(port)); if (tmp & DVO_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else @@ -186,14 +187,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum port port = encoder->port; intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); - intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0); - intel_de_posting_read(i915, DVO(port)); + intel_de_rmw(display, DVO(port), DVO_ENABLE, 0); + intel_de_posting_read(display, DVO(port)); } static void intel_enable_dvo(struct intel_atomic_state *state, @@ -201,7 +202,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum port port = encoder->port; @@ -209,8 +210,8 @@ static void intel_enable_dvo(struct intel_atomic_state *state, &pipe_config->hw.mode, &pipe_config->hw.adjusted_mode); - intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE); - intel_de_posting_read(i915, DVO(port)); + intel_de_rmw(display, DVO(port), 0, DVO_ENABLE); + intel_de_posting_read(display, DVO(port)); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); } @@ -288,7 +289,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port = encoder->port; @@ -296,7 +297,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state, u32 dvo_val; /* Save the active data order, since I don't know what it should be set to. */ - dvo_val = intel_de_read(i915, DVO(port)) & + dvo_val = intel_de_read(display, DVO(port)) & (DVO_DEDICATED_INT_ENABLE | DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK); dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | @@ -309,10 +310,10 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) dvo_val |= DVO_VSYNC_ACTIVE_HIGH; - intel_de_write(i915, DVO_SRCDIM(port), + intel_de_write(display, DVO_SRCDIM(port), DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) | DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay)); - intel_de_write(i915, DVO(port), dvo_val); + intel_de_write(display, DVO(port), dvo_val); } static enum drm_connector_status @@ -320,10 +321,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force) { struct intel_display *display = to_intel_display(_connector->dev); struct intel_connector *connector = to_intel_connector(_connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dvo *intel_dvo = intel_attached_dvo(connector); - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", + drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id, connector->base.name); if (!intel_display_device_enabled(display)) @@ -414,11 +414,10 @@ static int intel_dvo_connector_type(const struct intel_dvo_device *dvo) } } -static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, +static bool intel_dvo_init_dev(struct intel_display *display, struct intel_dvo *intel_dvo, const struct intel_dvo_device *dvo) { - struct intel_display *display = &dev_priv->display; struct i2c_adapter *i2c; u32 dpll[I915_MAX_PIPES]; enum pipe pipe; @@ -458,15 +457,15 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * the clock enabled before we attempt to initialize * the device. */ - for_each_pipe(dev_priv, pipe) - dpll[pipe] = intel_de_rmw(dev_priv, DPLL(dev_priv, pipe), 0, + for_each_pipe(display, pipe) + dpll[pipe] = intel_de_rmw(display, DPLL(display, pipe), 0, DPLL_DVO_2X_MODE); ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); /* restore the DVO 2x clock state to original */ - for_each_pipe(dev_priv, pipe) { - intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll[pipe]); + for_each_pipe(display, pipe) { + intel_de_write(display, DPLL(display, pipe), dpll[pipe]); } intel_gmbus_force_bit(i2c, false); @@ -474,14 +473,14 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, return ret; } -static bool intel_dvo_probe(struct drm_i915_private *i915, +static bool intel_dvo_probe(struct intel_display *display, struct intel_dvo *intel_dvo) { int i; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { - if (intel_dvo_init_dev(i915, intel_dvo, + if (intel_dvo_init_dev(display, intel_dvo, &intel_dvo_devices[i])) return true; } @@ -489,9 +488,8 @@ static bool intel_dvo_probe(struct drm_i915_private *i915, return false; } -void intel_dvo_init(struct drm_i915_private *i915) +void intel_dvo_init(struct intel_display *display) { - struct intel_display *display = &i915->display; struct intel_connector *connector; struct intel_encoder *encoder; struct intel_dvo *intel_dvo; @@ -518,7 +516,7 @@ void intel_dvo_init(struct drm_i915_private *i915) encoder->pre_enable = intel_dvo_pre_enable; connector->get_hw_state = intel_dvo_connector_get_hw_state; - if (!intel_dvo_probe(i915, intel_dvo)) { + if (!intel_dvo_probe(display, intel_dvo)) { kfree(intel_dvo); intel_connector_free(connector); return; @@ -535,12 +533,12 @@ void intel_dvo_init(struct drm_i915_private *i915) encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) | BIT(INTEL_OUTPUT_DVO); - drm_encoder_init(&i915->drm, &encoder->base, + drm_encoder_init(display->drm, &encoder->base, &intel_dvo_enc_funcs, intel_dvo_encoder_type(&intel_dvo->dev), "DVO %c", port_name(encoder->port)); - drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n", + drm_dbg_kms(display->drm, "[ENCODER:%d:%s] detected %s\n", encoder->base.base.id, encoder->base.name, intel_dvo->dev.name); @@ -549,7 +547,7 @@ void intel_dvo_init(struct drm_i915_private *i915) DRM_CONNECTOR_POLL_DISCONNECT; connector->base.polled = connector->polled; - drm_connector_init_with_ddc(&i915->drm, &connector->base, + drm_connector_init_with_ddc(display->drm, &connector->base, &intel_dvo_connector_funcs, intel_dvo_connector_type(&intel_dvo->dev), intel_gmbus_get_adapter(display, GMBUS_PIN_DPC)); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h index bf7a356422ab..83776552fc87 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.h +++ b/drivers/gpu/drm/i915/display/intel_dvo.h @@ -6,12 +6,12 @@ #ifndef __INTEL_DVO_H__ #define __INTEL_DVO_H__ -struct drm_i915_private; +struct intel_display; #ifdef I915 -void intel_dvo_init(struct drm_i915_private *dev_priv); +void intel_dvo_init(struct intel_display *display); #else -static inline void intel_dvo_init(struct drm_i915_private *dev_priv) +static inline void intel_dvo_init(struct intel_display *display) { } #endif diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 30ac9b089ad6..c648ab8a93d7 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -12,6 +12,7 @@ #include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" @@ -117,7 +118,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_gem_object *_obj = intel_fb_bo(fb); struct drm_i915_gem_object *obj = to_intel_bo(_obj); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; struct i915_gem_ww_ctx ww; struct i915_vma *vma; unsigned int pinctl; @@ -136,7 +137,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); atomic_inc(&display->restore.pending_fb_pin); @@ -215,7 +216,7 @@ err: vma = ERR_PTR(ret); atomic_dec(&display->restore.pending_fb_pin); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return vma; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b6978135e8ad..ce5b1e3f1c20 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -55,6 +55,7 @@ #include "intel_cdclk.h" #include "intel_de.h" #include "intel_display_device.h" +#include "intel_display_rpm.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_display_wa.h" @@ -519,6 +520,20 @@ static void ilk_fbc_activate(struct intel_fbc *fbc) DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } +static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc, + bool disable) +{ + struct intel_display *display = fbc->display; + + if (display->platform.dg2) + intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS, + disable ? DG2_DPFC_GATING_DIS : 0); + else if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id), + MTL_DPFC_GATING_DIS, + disable ? MTL_DPFC_GATING_DIS : 0); +} + static void ilk_fbc_deactivate(struct intel_fbc *fbc) { struct intel_display *display = fbc->display; @@ -532,6 +547,10 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc) if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); + + /* wa_18038517565 Enable DPFC clock gating after FBC disable */ + if (display->platform.dg2 || DISPLAY_VER(display) >= 14) + fbc_compressor_clkgate_disable_wa(fbc, false); } } @@ -921,6 +940,10 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc) if (DISPLAY_VER(display) >= 11 && !display->platform.dg2) intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); + + /* wa_18038517565 Disable DPFC clock gating before FBC enable */ + if (display->platform.dg2 || DISPLAY_VER(display) >= 14) + fbc_compressor_clkgate_disable_wa(fbc, true); } static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) @@ -1436,7 +1459,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (intel_display_needs_wa_16023588340(i915)) { + if (intel_display_needs_wa_16023588340(display)) { plane_state->no_fbc_reason = "Wa_16023588340"; return 0; } @@ -1464,14 +1487,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 * - * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot - * coexist. So if PSR2 selective fetch is supported then mark that - * FBC is not supported. - * TODO: Need a logic to decide between PSR2 and FBC Dirty rect + * TODO: Implement a logic to select between PSR2 selective fetch and + * FBC based on Bspec: 68881 in xe2lpd onwards. + * + * As we still see some strange underruns in those platforms while + * disabling PSR2, keep FBC disabled in case of selective update is on + * until the selection logic is implemented. */ - if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) && - crtc_state->has_sel_update && !crtc_state->has_panel_replay) { - plane_state->no_fbc_reason = "PSR2 enabled"; + if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) { + plane_state->no_fbc_reason = "Selective update enabled"; return 0; } @@ -2120,13 +2144,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_fbc *fbc = m->private; struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_plane *plane; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; drm_modeset_lock_all(display->drm); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); mutex_lock(&fbc->lock); if (fbc->active) { @@ -2151,7 +2174,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); drm_modeset_unlock_all(display->drm); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index adc19d5607de..369f46286e95 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -50,6 +50,7 @@ #include "i915_drv.h" #include "i915_vma.h" #include "intel_bo.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" @@ -213,7 +214,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct intel_framebuffer *fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); - intel_wakeref_t wakeref; + struct intel_display *display = to_intel_display(dev); + struct ref_tracker *wakeref; struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; @@ -247,7 +249,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, sizes->fb_height = fb->base.height; } - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -299,14 +301,15 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; out_unpin: intel_fb_unpin_vma(vma, flags); out_unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); + return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 7a8fbff39be0..451cd26024f7 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -136,14 +136,13 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display, static void ilk_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; if (enable) - ilk_enable_display_irq(dev_priv, bit); + ilk_enable_display_irq(display, bit); else - ilk_disable_display_irq(dev_priv, bit); + ilk_disable_display_irq(display, bit); } static void ivb_check_fifo_underruns(struct intel_crtc *crtc) @@ -169,7 +168,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pipe, bool enable, bool old) { - struct drm_i915_private *dev_priv = to_i915(display->drm); if (enable) { intel_de_write(display, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); @@ -177,9 +175,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display, if (!ivb_can_enable_err_int(display)) return; - ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB); + ilk_enable_display_irq(display, DE_ERR_INT_IVB); } else { - ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + ilk_disable_display_irq(display, DE_ERR_INT_IVB); if (old && intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { @@ -193,26 +191,23 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display, static void bdw_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (enable) - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN); else - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pch_transcoder, bool enable) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 bit = (pch_transcoder == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) - ibx_enable_display_interrupt(dev_priv, bit); + ibx_enable_display_interrupt(display, bit); else - ibx_disable_display_interrupt(dev_priv, bit); + ibx_disable_display_interrupt(display, bit); } static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) @@ -240,8 +235,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pch_transcoder, bool enable, bool old) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (enable) { intel_de_write(display, SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); @@ -249,9 +242,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display, if (!cpt_can_enable_serr_int(display)) return; - ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); + ibx_enable_display_interrupt(display, SDE_ERROR_CPT); } else { - ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); + ibx_disable_display_interrupt(display, SDE_ERROR_CPT); if (old && intel_de_read(display, SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { @@ -477,8 +470,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display, struct intel_crtc *crtc, bool enable) { - struct drm_i915_private *i915 = to_i915(display->drm); - crtc->cpu_fifo_underrun_disabled = !enable; /* @@ -490,6 +481,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display, * PCH transcoders B and C would prevent enabling the south * error interrupt (see cpt_can_enable_serr_int()). */ - if (intel_has_pch_trancoder(i915, crtc->pipe)) + if (intel_has_pch_trancoder(display, crtc->pipe)) crtc->pch_fifo_underrun_disabled = !enable; } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 1bf424a822f3..411f17655f89 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -22,7 +22,9 @@ #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_dp_mst.h" #include "intel_hdcp.h" #include "intel_hdcp_gsc.h" #include "intel_hdcp_regs.h" @@ -136,7 +138,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state, data->k++; /* if there is only one active stream */ - if (dig_port->dp.mst.active_links <= 1) + if (intel_dp_mst_active_streams(&dig_port->dp) <= 1) break; } drm_connector_list_iter_end(&conn_iter); @@ -334,9 +336,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, static bool hdcp_key_loadable(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); enum i915_power_well_id id; - intel_wakeref_t wakeref; bool enabled = false; /* @@ -349,7 +349,7 @@ static bool hdcp_key_loadable(struct intel_display *display) id = SKL_DISP_PW_1; /* PG1 (power well #1) needs to be enabled */ - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_display_rpm(display) enabled = intel_display_power_well_is_enabled(display, id); /* diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 33b8d5229db0..f9fa17e1f584 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -64,6 +64,7 @@ #include "intel_panel.h" #include "intel_pfit.h" #include "intel_snps_phy.h" +#include "intel_vrr.h" static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) @@ -2384,6 +2385,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, } } + intel_vrr_compute_config(pipe_config, conn_state); + intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 00d7b1ccf190..6885e5a09079 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -30,6 +30,7 @@ #include "i915_irq.h" #include "intel_connector.h" #include "intel_display_power.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -118,7 +119,7 @@ intel_connector_hpd_pin(struct intel_connector *connector) /** * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin - * @dev_priv: private driver data pointer + * @display: display device * @pin: the pin to gather stats on * @long_hpd: whether the HPD IRQ was long or short * @@ -127,13 +128,13 @@ intel_connector_hpd_pin(struct intel_connector *connector) * responsible for further action. * * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is - * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to + * stored in @display->hotplug.hpd_storm_threshold which defaults to * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and * short IRQs count as +1. If this threshold is exceeded, it's considered an * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. * * By default, most systems will only count long IRQs towards - * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also + * &display->hotplug.hpd_storm_threshold. However, some older systems also * suffer from short IRQ storms and must also track these. Because short IRQ * storms are naturally caused by sideband interactions with DP MST devices, * short IRQ detection is only enabled for systems without DP MST support. @@ -145,10 +146,10 @@ intel_connector_hpd_pin(struct intel_connector *connector) * * Return true if an IRQ storm was detected on @pin. */ -static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, +static bool intel_hpd_irq_storm_detect(struct intel_display *display, enum hpd_pin pin, bool long_hpd) { - struct intel_hotplug *hpd = &dev_priv->display.hotplug; + struct intel_hotplug *hpd = &display->hotplug; unsigned long start = hpd->stats[pin].last_jiffies; unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); const int increment = long_hpd ? 10 : 1; @@ -156,7 +157,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, bool storm = false; if (!threshold || - (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) + (!long_hpd && !display->hotplug.hpd_short_storm_enabled)) return false; if (!time_in_range(jiffies, start, end)) { @@ -167,11 +168,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, hpd->stats[pin].count += increment; if (hpd->stats[pin].count > threshold) { hpd->stats[pin].state = HPD_MARK_DISABLED; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "HPD interrupt storm detected on PIN %d\n", pin); storm = true; } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Received HPD interrupt on PIN %d - cnt: %d\n", pin, hpd->stats[pin].count); @@ -180,56 +181,65 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, return storm; } -static bool detection_work_enabled(struct drm_i915_private *i915) +static bool detection_work_enabled(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - return i915->display.hotplug.detection_work_enabled; + return display->hotplug.detection_work_enabled; } static bool -mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) +mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - if (!detection_work_enabled(i915)) + if (!detection_work_enabled(display)) return false; return mod_delayed_work(i915->unordered_wq, work, delay); } static bool -queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) +queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - if (!detection_work_enabled(i915)) + if (!detection_work_enabled(display)) return false; return queue_delayed_work(i915->unordered_wq, work, delay); } static bool -queue_detection_work(struct drm_i915_private *i915, struct work_struct *work) +queue_detection_work(struct intel_display *display, struct work_struct *work) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - if (!detection_work_enabled(i915)) + if (!detection_work_enabled(display)) return false; return queue_work(i915->unordered_wq, work); } static void -intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) +intel_hpd_irq_storm_switch_to_polling(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; bool hpd_disabled = false; lockdep_assert_held(&dev_priv->irq_lock); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; @@ -238,15 +248,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) + display->hotplug.stats[pin].state != HPD_MARK_DISABLED) continue; - drm_info(&dev_priv->drm, + drm_info(display->drm, "HPD interrupt storm detected on connector %s: " "switching from hotplug detection to polling\n", connector->base.name); - dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; + display->hotplug.stats[pin].state = HPD_DISABLED; connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; hpd_disabled = true; @@ -255,36 +265,36 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_reschedule(&dev_priv->drm); - mod_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.reenable_work, + drm_kms_helper_poll_reschedule(display->drm); + mod_delayed_detection_work(display, + &display->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), - display.hotplug.reenable_work.work); + struct intel_display *display = + container_of(work, typeof(*display), hotplug.reenable_work.work); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; enum hpd_pin pin; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); spin_lock_irq(&dev_priv->irq_lock); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) + display->hotplug.stats[pin].state != HPD_DISABLED) continue; if (connector->base.polled != connector->polled) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Reenabling HPD on connector %s\n", connector->base.name); connector->base.polled = connector->polled; @@ -292,15 +302,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); for_each_hpd_pin(pin) { - if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) - dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; + if (display->hotplug.stats[pin].state == HPD_DISABLED) + display->hotplug.stats[pin].state = HPD_ENABLED; } - intel_hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(display); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); } static enum intel_hotplug_state @@ -349,32 +359,75 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) enc_to_dig_port(encoder)->hpd_pulse != NULL; } +static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(display->drm, encoder) { + if (encoder->hpd_pin != pin) + continue; + + if (intel_encoder_has_hpd_pulse(encoder)) + return true; + } + + return false; +} + +static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + lockdep_assert_held(&i915->irq_lock); + + return display->hotplug.stats[pin].blocked_count; +} + +static u32 get_blocked_hpd_pin_mask(struct intel_display *display) +{ + enum hpd_pin pin; + u32 hpd_pin_mask = 0; + + for_each_hpd_pin(pin) { + if (hpd_pin_is_blocked(display, pin)) + hpd_pin_mask |= BIT(pin); + } + + return hpd_pin_mask; +} + static void i915_digport_work_func(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); - u32 long_port_mask, short_port_mask; + struct intel_display *display = + container_of(work, struct intel_display, hotplug.dig_port_work); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + u32 long_hpd_pin_mask, short_hpd_pin_mask; struct intel_encoder *encoder; + u32 blocked_hpd_pin_mask; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); - long_port_mask = dev_priv->display.hotplug.long_port_mask; - dev_priv->display.hotplug.long_port_mask = 0; - short_port_mask = dev_priv->display.hotplug.short_port_mask; - dev_priv->display.hotplug.short_port_mask = 0; + + blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); + long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask; + hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask; + short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask; + hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask; + spin_unlock_irq(&dev_priv->irq_lock); - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(display->drm, encoder) { struct intel_digital_port *dig_port; - enum port port = encoder->port; + enum hpd_pin pin = encoder->hpd_pin; bool long_hpd, short_hpd; enum irqreturn ret; if (!intel_encoder_has_hpd_pulse(encoder)) continue; - long_hpd = long_port_mask & BIT(port); - short_hpd = short_port_mask & BIT(port); + long_hpd = long_hpd_pin_mask & BIT(pin); + short_hpd = short_hpd_pin_mask & BIT(pin); if (!long_hpd && !short_hpd) continue; @@ -384,15 +437,15 @@ static void i915_digport_work_func(struct work_struct *work) ret = dig_port->hpd_pulse(dig_port, long_hpd); if (ret == IRQ_NONE) { /* fall back to old school hpd */ - old_bits |= BIT(encoder->hpd_pin); + old_bits |= BIT(pin); } } if (old_bits) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.event_bits |= old_bits; - queue_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.hotplug_work, 0); + display->hotplug.event_bits |= old_bits; + queue_delayed_detection_work(display, + &display->hotplug.hotplug_work, 0); spin_unlock_irq(&dev_priv->irq_lock); } } @@ -406,13 +459,18 @@ static void i915_digport_work_func(struct work_struct *work) */ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + struct intel_encoder *encoder = &dig_port->base; spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); - spin_unlock_irq(&i915->irq_lock); - queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); + hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin); + if (!hpd_pin_is_blocked(display, encoder->hpd_pin)) + queue_work(hotplug->dp_wq, &hotplug->dig_port_work); + + spin_unlock_irq(&i915->irq_lock); } /* @@ -420,9 +478,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) */ static void i915_hotplug_work_func(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - display.hotplug.hotplug_work.work); + struct intel_display *display = + container_of(work, struct intel_display, hotplug.hotplug_work.work); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; u32 changed = 0, retry = 0; @@ -430,30 +489,32 @@ static void i915_hotplug_work_func(struct work_struct *work) u32 hpd_retry_bits; struct drm_connector *first_changed_connector = NULL; int changed_connectors = 0; + u32 blocked_hpd_pin_mask; - mutex_lock(&dev_priv->drm.mode_config.mutex); - drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); + mutex_lock(&display->drm->mode_config.mutex); + drm_dbg_kms(display->drm, "running encoder hotplug functions\n"); spin_lock_irq(&dev_priv->irq_lock); - hpd_event_bits = dev_priv->display.hotplug.event_bits; - dev_priv->display.hotplug.event_bits = 0; - hpd_retry_bits = dev_priv->display.hotplug.retry_bits; - dev_priv->display.hotplug.retry_bits = 0; + blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); + hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask; + hotplug->event_bits &= ~hpd_event_bits; + hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask; + hotplug->retry_bits &= ~hpd_retry_bits; /* Enable polling for connectors which had HPD IRQ storms */ - intel_hpd_irq_storm_switch_to_polling(dev_priv); + intel_hpd_irq_storm_switch_to_polling(display); spin_unlock_irq(&dev_priv->irq_lock); /* Skip calling encode hotplug handlers if ignore long HPD set*/ - if (dev_priv->display.hotplug.ignore_long_hpd) { - drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + if (display->hotplug.ignore_long_hpd) { + drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); + mutex_unlock(&display->drm->mode_config.mutex); return; } - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; u32 hpd_bit; @@ -472,7 +533,7 @@ static void i915_hotplug_work_func(struct work_struct *work) else connector->hotplug_retries++; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Connector %s (pin %i) received hotplug event. (retry %d)\n", connector->base.name, pin, connector->hotplug_retries); @@ -495,12 +556,12 @@ static void i915_hotplug_work_func(struct work_struct *work) } } drm_connector_list_iter_end(&conn_iter); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (changed_connectors == 1) drm_kms_helper_connector_hotplug_event(first_changed_connector); else if (changed_connectors > 0) - drm_kms_helper_hotplug_event(&dev_priv->drm); + drm_kms_helper_hotplug_event(display->drm); if (first_changed_connector) drm_connector_put(first_changed_connector); @@ -509,10 +570,10 @@ static void i915_hotplug_work_func(struct work_struct *work) retry &= ~changed; if (retry) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.retry_bits |= retry; + display->hotplug.retry_bits |= retry; - mod_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.hotplug_work, + mod_delayed_detection_work(display, + &display->hotplug.hotplug_work, msecs_to_jiffies(HPD_RETRY_DELAY)); spin_unlock_irq(&dev_priv->irq_lock); } @@ -521,7 +582,7 @@ static void i915_hotplug_work_func(struct work_struct *work) /** * intel_hpd_irq_handler - main hotplug irq handler - * @dev_priv: drm_i915_private + * @display: display device * @pin_mask: a mask of hpd pins that have triggered the irq * @long_mask: a mask of hpd pins that may be long hpd pulses * @@ -535,9 +596,10 @@ static void i915_hotplug_work_func(struct work_struct *work) * Here, we do hotplug irq storm detection and mitigation, and pass further * processing to appropriate bottom halves. */ -void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, +void intel_hpd_irq_handler(struct intel_display *display, u32 pin_mask, u32 long_mask) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; @@ -556,8 +618,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * as each pin may have up to two encoders (HDMI and DP) and * only the one of them (DP) will have ->hpd_pulse(). */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - enum port port = encoder->port; + for_each_intel_encoder(display->drm, encoder) { bool long_hpd; pin = encoder->hpd_pin; @@ -569,18 +630,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, long_hpd = long_mask & BIT(pin); - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "digital hpd on [ENCODER:%d:%s] - %s\n", encoder->base.base.id, encoder->base.name, long_hpd ? "long" : "short"); - queue_dig = true; + + if (!hpd_pin_is_blocked(display, pin)) + queue_dig = true; if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); - dev_priv->display.hotplug.long_port_mask |= BIT(port); + display->hotplug.long_hpd_pin_mask |= BIT(pin); } else { short_hpd_pulse_mask |= BIT(pin); - dev_priv->display.hotplug.short_port_mask |= BIT(port); + display->hotplug.short_hpd_pin_mask |= BIT(pin); } } @@ -591,20 +654,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (!(BIT(pin) & pin_mask)) continue; - if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { + if (display->hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), + drm_WARN_ONCE(display->drm, !HAS_GMCH(display), "Received HPD interrupt on pin %d although disabled\n", pin); continue; } - if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) + if (display->hotplug.stats[pin].state != HPD_ENABLED) continue; /* @@ -615,13 +678,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { long_hpd = long_hpd_pulse_mask & BIT(pin); } else { - dev_priv->display.hotplug.event_bits |= BIT(pin); + display->hotplug.event_bits |= BIT(pin); long_hpd = true; - queue_hp = true; + + if (!hpd_pin_is_blocked(display, pin)) + queue_hp = true; } - if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { - dev_priv->display.hotplug.event_bits &= ~BIT(pin); + if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) { + display->hotplug.event_bits &= ~BIT(pin); storm_detected = true; queue_hp = true; } @@ -632,7 +697,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * happens later in our hotplug work. */ if (storm_detected) - intel_hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(display); /* * Our hotplug handler can grab modeset locks (by calling down into the @@ -641,17 +706,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * deadlock. */ if (queue_dig) - queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); + queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work); if (queue_hp) - queue_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.hotplug_work, 0); + queue_delayed_detection_work(display, + &display->hotplug.hotplug_work, 0); spin_unlock(&dev_priv->irq_lock); } /** * intel_hpd_init - initializes and enables hpd support - * @dev_priv: i915 device instance + * @display: display device instance * * This function enables the hotplug support. It requires that interrupts have * already been enabled with intel_irq_init_hw(). From this point on hotplug and @@ -663,16 +728,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). */ -void intel_hpd_init(struct drm_i915_private *dev_priv) +void intel_hpd_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int i; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; for_each_hpd_pin(i) { - dev_priv->display.hotplug.stats[i].count = 0; - dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; + display->hotplug.stats[i].count = 0; + display->hotplug.stats[i].state = HPD_ENABLED; } /* @@ -680,23 +746,23 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) * just to make the assert_spin_locked checks happy. */ spin_lock_irq(&dev_priv->irq_lock); - intel_hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(display); spin_unlock_irq(&dev_priv->irq_lock); } -static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) +static void i915_hpd_poll_detect_connectors(struct intel_display *display) { struct drm_connector_list_iter conn_iter; struct intel_connector *connector; struct intel_connector *first_changed_connector = NULL; int changed = 0; - mutex_lock(&i915->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); - if (!i915->drm.mode_config.poll_enabled) + if (!display->drm->mode_config.poll_enabled) goto out; - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) continue; @@ -714,7 +780,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) drm_connector_list_iter_end(&conn_iter); out: - mutex_unlock(&i915->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!changed) return; @@ -722,25 +788,24 @@ out: if (changed == 1) drm_kms_helper_connector_hotplug_event(&first_changed_connector->base); else - drm_kms_helper_hotplug_event(&i915->drm); + drm_kms_helper_hotplug_event(display->drm); drm_connector_put(&first_changed_connector->base); } static void i915_hpd_poll_init_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - display.hotplug.poll_init_work); - struct intel_display *display = &dev_priv->display; + struct intel_display *display = + container_of(work, typeof(*display), hotplug.poll_init_work); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; intel_wakeref_t wakeref; bool enabled; - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); - enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); + enabled = READ_ONCE(display->hotplug.poll_enabled); /* * Prevent taking a power reference from this sequence of * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> @@ -750,14 +815,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work) if (!enabled) { wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE); - drm_WARN_ON(&dev_priv->drm, - READ_ONCE(dev_priv->display.hotplug.poll_enabled)); - cancel_work(&dev_priv->display.hotplug.poll_init_work); + drm_WARN_ON(display->drm, + READ_ONCE(display->hotplug.poll_enabled)); + cancel_work(&display->hotplug.poll_init_work); } spin_lock_irq(&dev_priv->irq_lock); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; @@ -765,7 +830,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) if (pin == HPD_NONE) continue; - if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) + if (display->hotplug.stats[pin].state == HPD_DISABLED) continue; connector->base.polled = connector->polled; @@ -779,16 +844,16 @@ static void i915_hpd_poll_init_work(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); if (enabled) - drm_kms_helper_poll_reschedule(&dev_priv->drm); + drm_kms_helper_poll_reschedule(display->drm); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); /* * We might have missed any hotplugs that happened while we were * in the middle of disabling polling */ if (!enabled) { - i915_hpd_poll_detect_connectors(dev_priv); + i915_hpd_poll_detect_connectors(display); intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, @@ -798,7 +863,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) /** * intel_hpd_poll_enable - enable polling for connectors with hpd - * @dev_priv: i915 device instance + * @display: display device instance * * This function enables polling for all connectors which support HPD. * Under certain conditions HPD may not be functional. On most Intel GPUs, @@ -812,15 +877,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work) * * Also see: intel_hpd_init() and intel_hpd_poll_disable(). */ -void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) +void intel_hpd_poll_enable(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - if (!HAS_DISPLAY(dev_priv) || - !intel_display_device_enabled(display)) + if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display)) return; - WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); + WRITE_ONCE(display->hotplug.poll_enabled, true); /* * We might already be holding dev->mode_config.mutex, so do this in a @@ -829,14 +893,14 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * this worker anyway */ spin_lock_irq(&dev_priv->irq_lock); - queue_detection_work(dev_priv, - &dev_priv->display.hotplug.poll_init_work); + queue_detection_work(display, + &display->hotplug.poll_init_work); spin_unlock_irq(&dev_priv->irq_lock); } /** * intel_hpd_poll_disable - disable polling for connectors with hpd - * @dev_priv: i915 device instance + * @display: display device instance * * This function disables polling for all connectors which support HPD. * Under certain conditions HPD may not be functional. On most Intel GPUs, @@ -853,26 +917,28 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * * Also see: intel_hpd_init() and intel_hpd_poll_enable(). */ -void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) +void intel_hpd_poll_disable(struct intel_display *display) { - if (!HAS_DISPLAY(dev_priv)) + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (!HAS_DISPLAY(display)) return; - WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); + WRITE_ONCE(display->hotplug.poll_enabled, false); spin_lock_irq(&dev_priv->irq_lock); - queue_detection_work(dev_priv, - &dev_priv->display.hotplug.poll_init_work); + queue_detection_work(display, + &display->hotplug.poll_init_work); spin_unlock_irq(&dev_priv->irq_lock); } -void intel_hpd_poll_fini(struct drm_i915_private *i915) +void intel_hpd_poll_fini(struct intel_display *display) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; /* Kill all the work that may have been queued by hpd. */ - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { intel_connector_cancel_modeset_retry_work(connector); intel_hdcp_cancel_works(connector); @@ -880,141 +946,257 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915) drm_connector_list_iter_end(&conn_iter); } -void intel_hpd_init_early(struct drm_i915_private *i915) +void intel_hpd_init_early(struct intel_display *display) { - INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, + INIT_DELAYED_WORK(&display->hotplug.hotplug_work, i915_hotplug_work_func); - INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); - INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); - INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, + INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func); + INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work); + INIT_DELAYED_WORK(&display->hotplug.reenable_work, intel_hpd_irq_storm_reenable_work); - i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; + display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; /* If we have MST support, we want to avoid doing short HPD IRQ storm * detection, as short HPD storms will occur as a natural part of * sideband messaging with MST. * On older platforms however, IRQ storms can occur with both long and * short pulses, as seen on some G4x systems. */ - i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); + display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display); } -static bool cancel_all_detection_work(struct drm_i915_private *i915) +static bool cancel_all_detection_work(struct intel_display *display) { bool was_pending = false; - if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work)) + if (cancel_delayed_work_sync(&display->hotplug.hotplug_work)) was_pending = true; - if (cancel_work_sync(&i915->display.hotplug.poll_init_work)) + if (cancel_work_sync(&display->hotplug.poll_init_work)) was_pending = true; - if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work)) + if (cancel_delayed_work_sync(&display->hotplug.reenable_work)) was_pending = true; return was_pending; } -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +void intel_hpd_cancel_work(struct intel_display *display) { - if (!HAS_DISPLAY(dev_priv)) + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (!HAS_DISPLAY(display)) return; spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.long_port_mask = 0; - dev_priv->display.hotplug.short_port_mask = 0; - dev_priv->display.hotplug.event_bits = 0; - dev_priv->display.hotplug.retry_bits = 0; + drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display)); + + display->hotplug.long_hpd_pin_mask = 0; + display->hotplug.short_hpd_pin_mask = 0; + display->hotplug.event_bits = 0; + display->hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); - cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); + cancel_work_sync(&display->hotplug.dig_port_work); /* * All other work triggered by hotplug events should be canceled by * now. */ - if (cancel_all_detection_work(dev_priv)) - drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); + if (cancel_all_detection_work(display)) + drm_dbg_kms(display->drm, "Hotplug detection work still active\n"); } -bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) +static void queue_work_for_missed_irqs(struct intel_display *display) { - bool ret = false; + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + bool queue_hp_work = false; + u32 blocked_hpd_pin_mask; + enum hpd_pin pin; - if (pin == HPD_NONE) - return false; + lockdep_assert_held(&i915->irq_lock); - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { - dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; - ret = true; + blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); + if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask) + queue_hp_work = true; + + for_each_hpd_pin(pin) { + switch (display->hotplug.stats[pin].state) { + case HPD_MARK_DISABLED: + queue_hp_work = true; + break; + case HPD_DISABLED: + case HPD_ENABLED: + break; + default: + MISSING_CASE(display->hotplug.stats[pin].state); + } } - spin_unlock_irq(&dev_priv->irq_lock); - return ret; + if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask) + queue_work(hotplug->dp_wq, &hotplug->dig_port_work); + + if (queue_hp_work) + queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0); } -void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) +static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin) { - if (pin == HPD_NONE) - return; + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; - spin_unlock_irq(&dev_priv->irq_lock); + lockdep_assert_held(&i915->irq_lock); + + hotplug->stats[pin].blocked_count++; + + return hotplug->stats[pin].blocked_count == 1; } -static void queue_work_for_missed_irqs(struct drm_i915_private *i915) +static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin) { - bool queue_work = false; - enum hpd_pin pin; + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; lockdep_assert_held(&i915->irq_lock); - if (i915->display.hotplug.event_bits || - i915->display.hotplug.retry_bits) - queue_work = true; + if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0)) + return true; - for_each_hpd_pin(pin) { - switch (i915->display.hotplug.stats[pin].state) { - case HPD_MARK_DISABLED: - queue_work = true; - break; - case HPD_ENABLED: - break; - default: - MISSING_CASE(i915->display.hotplug.stats[pin].state); - } + hotplug->stats[pin].blocked_count--; + + return hotplug->stats[pin].blocked_count == 0; +} + +/** + * intel_hpd_block - Block handling of HPD IRQs on an HPD pin + * @encoder: Encoder to block the HPD handling for + * + * Blocks the handling of HPD IRQs on the HPD pin of @encoder. + * + * On return: + * + * - It's guaranteed that the blocked encoders' HPD pulse handler + * (via intel_digital_port::hpd_pulse()) is not running. + * - The hotplug event handling (via intel_encoder::hotplug()) of an + * HPD IRQ pending at the time this function is called may be still + * running. + * - Detection on the encoder's connector (via + * drm_connector_helper_funcs::detect_ctx(), + * drm_connector_funcs::detect()) remains allowed, for instance as part of + * userspace connector probing, or DRM core's connector polling. + * + * The call must be followed by calling intel_hpd_unblock(), or + * intel_hpd_clear_and_unblock(). + * + * Note that the handling of HPD IRQs for another encoder using the same HPD + * pin as that of @encoder will be also blocked. + */ +void intel_hpd_block(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + bool do_flush = false; + + if (encoder->hpd_pin == HPD_NONE) + return; + + spin_lock_irq(&i915->irq_lock); + + if (block_hpd_pin(display, encoder->hpd_pin)) + do_flush = true; + + spin_unlock_irq(&i915->irq_lock); + + if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin)) + flush_work(&hotplug->dig_port_work); +} + +/** + * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin + * @encoder: Encoder to unblock the HPD handling for + * + * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was + * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the + * HPD pin while it was blocked will be handled for @encoder and for any + * other encoder sharing the same HPD pin. + */ +void intel_hpd_unblock(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (encoder->hpd_pin == HPD_NONE) + return; + + spin_lock_irq(&i915->irq_lock); + + if (unblock_hpd_pin(display, encoder->hpd_pin)) + queue_work_for_missed_irqs(display); + + spin_unlock_irq(&i915->irq_lock); +} + +/** + * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin + * @encoder: Encoder to unblock the HPD handling for + * + * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was + * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the + * HPD pin while it was blocked will be cleared, handling only new IRQs. + */ +void intel_hpd_clear_and_unblock(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + enum hpd_pin pin = encoder->hpd_pin; + + if (pin == HPD_NONE) + return; + + spin_lock_irq(&i915->irq_lock); + + if (unblock_hpd_pin(display, pin)) { + hotplug->event_bits &= ~BIT(pin); + hotplug->retry_bits &= ~BIT(pin); + hotplug->short_hpd_pin_mask &= ~BIT(pin); + hotplug->long_hpd_pin_mask &= ~BIT(pin); } - if (queue_work) - queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); + spin_unlock_irq(&i915->irq_lock); } -void intel_hpd_enable_detection_work(struct drm_i915_private *i915) +void intel_hpd_enable_detection_work(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.detection_work_enabled = true; - queue_work_for_missed_irqs(i915); + display->hotplug.detection_work_enabled = true; + queue_work_for_missed_irqs(display); spin_unlock_irq(&i915->irq_lock); } -void intel_hpd_disable_detection_work(struct drm_i915_private *i915) +void intel_hpd_disable_detection_work(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.detection_work_enabled = false; + display->hotplug.detection_work_enabled = false; spin_unlock_irq(&i915->irq_lock); - cancel_all_detection_work(i915); + cancel_all_detection_work(display); } -bool intel_hpd_schedule_detection(struct drm_i915_private *i915) +bool intel_hpd_schedule_detection(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); unsigned long flags; bool ret; spin_lock_irqsave(&i915->irq_lock, flags); - ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); + ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0); spin_unlock_irqrestore(&i915->irq_lock, flags); return ret; @@ -1022,15 +1204,16 @@ bool intel_hpd_schedule_detection(struct drm_i915_private *i915) static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; - struct intel_hotplug *hotplug = &dev_priv->display.hotplug; + struct intel_display *display = m->private; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ intel_synchronize_irq(dev_priv); - flush_work(&dev_priv->display.hotplug.dig_port_work); - flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); + flush_work(&display->hotplug.dig_port_work); + flush_delayed_work(&display->hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", @@ -1044,8 +1227,9 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct intel_hotplug *hotplug = &dev_priv->display.hotplug; + struct intel_display *display = m->private; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; unsigned int new_threshold; int i; char *newline; @@ -1070,11 +1254,11 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, return -EINVAL; if (new_threshold > 0) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Setting HPD storm detection threshold to %d\n", new_threshold); else - drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); + drm_dbg_kms(display->drm, "Disabling HPD storm detection\n"); spin_lock_irq(&dev_priv->irq_lock); hotplug->hpd_storm_threshold = new_threshold; @@ -1084,7 +1268,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->display.hotplug.reenable_work); + flush_delayed_work(&display->hotplug.reenable_work); return len; } @@ -1105,10 +1289,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = { static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; seq_printf(m, "Enabled: %s\n", - str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); + str_yes_no(display->hotplug.hpd_short_storm_enabled)); return 0; } @@ -1125,8 +1309,9 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct intel_hotplug *hotplug = &dev_priv->display.hotplug; + struct intel_display *display = m->private; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; char *newline; char tmp[16]; int i; @@ -1147,11 +1332,11 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, /* Reset to the "default" state for this system */ if (strcmp(tmp, "reset") == 0) - new_state = !HAS_DP_MST(dev_priv); + new_state = !HAS_DP_MST(display); else if (kstrtobool(tmp, &new_state) != 0) return -EINVAL; - drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", + drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n", new_state ? "En" : "Dis"); spin_lock_irq(&dev_priv->irq_lock); @@ -1162,7 +1347,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->display.hotplug.reenable_work); + flush_delayed_work(&display->hotplug.reenable_work); return len; } @@ -1176,14 +1361,14 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = { .write = i915_hpd_short_storm_ctl_write, }; -void intel_hpd_debugfs_register(struct drm_i915_private *i915) +void intel_hpd_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, - i915, &i915_hpd_storm_ctl_fops); + display, &i915_hpd_storm_ctl_fops); debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, - i915, &i915_hpd_short_storm_ctl_fops); + display, &i915_hpd_short_storm_ctl_fops); debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, - &i915->display.hotplug.ignore_long_hpd); + &display->hotplug.ignore_long_hpd); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index d6986902b054..edc41c9d3d65 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -8,30 +8,31 @@ #include <linux/types.h> -struct drm_i915_private; +enum port; struct intel_connector; struct intel_digital_port; +struct intel_display; struct intel_encoder; -enum port; -void intel_hpd_poll_enable(struct drm_i915_private *dev_priv); -void intel_hpd_poll_disable(struct drm_i915_private *dev_priv); -void intel_hpd_poll_fini(struct drm_i915_private *i915); +void intel_hpd_poll_enable(struct intel_display *display); +void intel_hpd_poll_disable(struct intel_display *display); +void intel_hpd_poll_fini(struct intel_display *display); enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, struct intel_connector *connector); -void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, +void intel_hpd_irq_handler(struct intel_display *display, u32 pin_mask, u32 long_mask); void intel_hpd_trigger_irq(struct intel_digital_port *dig_port); -void intel_hpd_init(struct drm_i915_private *dev_priv); -void intel_hpd_init_early(struct drm_i915_private *i915); -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); +void intel_hpd_init(struct intel_display *display); +void intel_hpd_init_early(struct intel_display *display); +void intel_hpd_cancel_work(struct intel_display *display); enum hpd_pin intel_hpd_pin_default(enum port port); -bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); -void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); -void intel_hpd_debugfs_register(struct drm_i915_private *i915); +void intel_hpd_block(struct intel_encoder *encoder); +void intel_hpd_unblock(struct intel_encoder *encoder); +void intel_hpd_clear_and_unblock(struct intel_encoder *encoder); +void intel_hpd_debugfs_register(struct intel_display *display); -void intel_hpd_enable_detection_work(struct drm_i915_private *i915); -void intel_hpd_disable_detection_work(struct drm_i915_private *i915); -bool intel_hpd_schedule_detection(struct drm_i915_private *i915); +void intel_hpd_enable_detection_work(struct intel_display *display); +void intel_hpd_disable_detection_work(struct intel_display *display); +bool intel_hpd_schedule_detection(struct intel_display *display); #endif /* __INTEL_HOTPLUG_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 2137ac7b882a..2463e61e7802 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -131,30 +131,31 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = { [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), }; -static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) +static void intel_hpd_init_pins(struct intel_display *display) { - struct intel_hotplug *hpd = &dev_priv->display.hotplug; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hpd = &display->hotplug; - if (HAS_GMCH(dev_priv)) { - if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv)) + if (HAS_GMCH(display)) { + if (display->platform.g4x || display->platform.valleyview || + display->platform.cherryview) hpd->hpd = hpd_status_g4x; else hpd->hpd = hpd_status_i915; return; } - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) hpd->hpd = hpd_xelpdp; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) hpd->hpd = hpd_gen11; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) hpd->hpd = hpd_bxt; - else if (DISPLAY_VER(dev_priv) == 9) + else if (DISPLAY_VER(display) == 9) hpd->hpd = NULL; /* no north HPD on SKL */ - else if (DISPLAY_VER(dev_priv) >= 8) + else if (DISPLAY_VER(display) >= 8) hpd->hpd = hpd_bdw; - else if (DISPLAY_VER(dev_priv) >= 7) + else if (DISPLAY_VER(display) >= 7) hpd->hpd = hpd_ivb; else hpd->hpd = hpd_ilk; @@ -180,19 +181,20 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) } /* For display hotplug interrupt */ -void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, +void i915_hotplug_interrupt_update_locked(struct intel_display *display, u32 mask, u32 bits) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, bits & ~mask); + drm_WARN_ON(display->drm, bits & ~mask); - intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask, - bits); + intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits); } /** * i915_hotplug_interrupt_update - update hotplug interrupt enable - * @dev_priv: driver private + * @display: display device * @mask: bits to update * @bits: bits to enable * NOTE: the HPD enable bits are modified both inside and outside @@ -202,12 +204,14 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, * held already, this function acquires the lock itself. A non-locking * version is also available. */ -void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, +void i915_hotplug_interrupt_update(struct intel_display *display, u32 mask, u32 bits) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + spin_lock_irq(&dev_priv->irq_lock); - i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); + i915_hotplug_interrupt_update_locked(display, mask, bits); spin_unlock_irq(&dev_priv->irq_lock); } @@ -339,7 +343,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) * * Note that the caller is expected to zero out the masks initially. */ -static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, +static void intel_get_hpd_pins(struct intel_display *display, u32 *pin_mask, u32 *long_mask, u32 hotplug_trigger, u32 dig_hotplug_reg, const u32 hpd[HPD_NUM_PINS], @@ -359,37 +363,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, *long_mask |= BIT(pin); } - drm_dbg(&dev_priv->drm, - "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", - hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); + drm_dbg_kms(display->drm, + "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); } -static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, +static u32 intel_hpd_enabled_irqs(struct intel_display *display, const u32 hpd[HPD_NUM_PINS]) { struct intel_encoder *encoder; u32 enabled_irqs = 0; - for_each_intel_encoder(&dev_priv->drm, encoder) - if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + for_each_intel_encoder(display->drm, encoder) + if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; } -static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, +static u32 intel_hpd_hotplug_irqs(struct intel_display *display, const u32 hpd[HPD_NUM_PINS]) { struct intel_encoder *encoder; u32 hotplug_irqs = 0; - for_each_intel_encoder(&dev_priv->drm, encoder) + for_each_intel_encoder(display->drm, encoder) hotplug_irqs |= hpd[encoder->hpd_pin]; return hotplug_irqs; } -static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915, +static u32 intel_hpd_hotplug_mask(struct intel_display *display, hotplug_mask_func hotplug_mask) { enum hpd_pin pin; @@ -401,25 +405,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915, return hotplug; } -static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, +static u32 intel_hpd_hotplug_enables(struct intel_display *display, hotplug_enables_func hotplug_enables) { struct intel_encoder *encoder; u32 hotplug = 0; - for_each_intel_encoder(&i915->drm, encoder) + for_each_intel_encoder(display->drm, encoder) hotplug |= hotplug_enables(encoder); return hotplug; } -u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) +u32 i9xx_hpd_irq_ack(struct intel_display *display) { u32 hotplug_status = 0, hotplug_status_mask; int i; - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (display->platform.g4x || + display->platform.valleyview || display->platform.cherryview) hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; else @@ -435,53 +439,51 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) * bits can itself generate a new hotplug interrupt :( */ for (i = 0; i < 10; i++) { - u32 tmp = intel_uncore_read(&dev_priv->uncore, - PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask; + u32 tmp = intel_de_read(display, + PORT_HOTPLUG_STAT(display)) & hotplug_status_mask; if (tmp == 0) return hotplug_status; hotplug_status |= tmp; - intel_uncore_write(&dev_priv->uncore, - PORT_HOTPLUG_STAT(dev_priv), - hotplug_status); + intel_de_write(display, PORT_HOTPLUG_STAT(display), + hotplug_status); } - drm_WARN_ONCE(&dev_priv->drm, 1, + drm_WARN_ONCE(display->drm, 1, "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", - intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv))); + intel_de_read(display, PORT_HOTPLUG_STAT(display))); return hotplug_status; } -void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) +void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status) { - struct intel_display *display = &dev_priv->display; u32 pin_mask = 0, long_mask = 0; u32 hotplug_trigger; - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (display->platform.g4x || + display->platform.valleyview || display->platform.cherryview) hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; else hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; if (hotplug_trigger) { - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } - if ((IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if ((display->platform.g4x || + display->platform.valleyview || display->platform.cherryview) && hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) intel_dp_aux_irq_handler(display); } -void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger) { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; @@ -491,7 +493,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) * zero. Not acking leads to "The master control interrupt lied (SDE)!" * errors. */ - dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); + dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG); if (!hotplug_trigger) { u32 mask = PORTA_HOTPLUG_STATUS_MASK | PORTD_HOTPLUG_STATUS_MASK | @@ -500,63 +502,62 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) dig_hotplug_reg &= ~mask; } - intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); + intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg); if (!hotplug_trigger) return; - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, pch_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } -void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) +void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir) { - struct intel_display *display = &i915->display; enum hpd_pin pin; u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK); u32 trigger_aux = iir & XELPDP_AUX_TC_MASK; u32 pin_mask = 0, long_mask = 0; - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(display) >= 20) trigger_aux |= iir & XE2LPD_AUX_DDI_MASK; for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) { u32 val; - if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger)) + if (!(display->hotplug.hpd[pin] & hotplug_trigger)) continue; pin_mask |= BIT(pin); - val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin)); - intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val); + val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin)); + intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val); if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT)) long_mask |= BIT(pin); } if (pin_mask) { - drm_dbg(&i915->drm, - "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", - hotplug_trigger, pin_mask, long_mask); + drm_dbg_kms(display->drm, + "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, pin_mask, long_mask); - intel_hpd_irq_handler(i915, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } if (trigger_aux) intel_dp_aux_irq_handler(display); if (!pin_mask && !trigger_aux) - drm_err(&i915->drm, + drm_err(display->drm, "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir); } -void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +void icp_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; u32 pin_mask = 0, long_mask = 0; @@ -566,36 +567,35 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) /* Locking due to DSI native GPIO sequences */ spin_lock(&dev_priv->irq_lock); - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0); spin_unlock(&dev_priv->irq_lock); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, icp_ddi_port_hotplug_long_detect); } if (tc_hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, icp_tc_port_hotplug_long_detect); } if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_ICP) intel_gmbus_irq_handler(display); } -void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +void spt_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -604,61 +604,61 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, spt_port_hotplug_long_detect); } if (hotplug2_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, spt_port_hotplug2_long_detect); } if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) intel_gmbus_irq_handler(display); } -void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger) { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, ilk_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } -void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger) { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, bxt_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } -void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +void gen11_hpd_irq_handler(struct intel_display *display, u32 iir) { u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; @@ -667,29 +667,29 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) if (trigger_tc) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, gen11_port_hotplug_long_detect); } if (trigger_tbt) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, gen11_port_hotplug_long_detect); } if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); else - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unexpected DE HPD interrupt 0x%08x\n", iir); } @@ -735,37 +735,37 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder) } } -static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void ibx_hpd_detection_setup(struct intel_display *display) { /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec). * The pulse duration bits are reserved on LPT+. */ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(display, ibx_hotplug_mask), + intel_hpd_hotplug_enables(display, ibx_hotplug_enables)); } static void ibx_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, - ibx_hotplug_mask(encoder->hpd_pin), - ibx_hotplug_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + ibx_hotplug_mask(encoder->hpd_pin), + ibx_hotplug_enables(encoder)); } -static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void ibx_hpd_irq_setup(struct intel_display *display) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); - ibx_hpd_detection_setup(dev_priv); + ibx_hpd_detection_setup(display); } static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin) @@ -806,36 +806,36 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) return icp_tc_hotplug_mask(encoder->hpd_pin); } -static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void icp_ddi_hpd_detection_setup(struct intel_display *display) { - intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, - intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); + intel_de_rmw(display, SHOTPLUG_CTL_DDI, + intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables)); } static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI, - icp_ddi_hotplug_mask(encoder->hpd_pin), - icp_ddi_hotplug_enables(encoder)); + intel_de_rmw(display, SHOTPLUG_CTL_DDI, + icp_ddi_hotplug_mask(encoder->hpd_pin), + icp_ddi_hotplug_enables(encoder)); } -static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void icp_tc_hpd_detection_setup(struct intel_display *display) { - intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, - intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); + intel_de_rmw(display, SHOTPLUG_CTL_TC, + intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask), + intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables)); } static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC, - icp_tc_hotplug_mask(encoder->hpd_pin), - icp_tc_hotplug_enables(encoder)); + intel_de_rmw(display, SHOTPLUG_CTL_TC, + icp_tc_hotplug_mask(encoder->hpd_pin), + icp_tc_hotplug_enables(encoder)); } static void icp_hpd_enable_detection(struct intel_encoder *encoder) @@ -844,23 +844,23 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder) icp_tc_hpd_enable_detection(encoder); } -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void icp_hpd_irq_setup(struct intel_display *display) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); /* * We reduce the value to 250us to be able to detect SHPD when an external display * is connected. This is also expected of us as stated in DP1.4a Table 3-4. */ - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); + intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); - icp_ddi_hpd_detection_setup(dev_priv); - icp_tc_hpd_detection_setup(dev_priv); + icp_ddi_hpd_detection_setup(display); + icp_tc_hpd_detection_setup(display); } static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin) @@ -883,59 +883,59 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder) return gen11_hotplug_mask(encoder->hpd_pin); } -static void dg1_hpd_invert(struct drm_i915_private *i915) +static void dg1_hpd_invert(struct intel_display *display) { u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | INVERT_DDIC_HPD | INVERT_DDID_HPD); - intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); + intel_de_rmw(display, SOUTH_CHICKEN1, 0, val); } static void dg1_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - dg1_hpd_invert(i915); + dg1_hpd_invert(display); icp_hpd_enable_detection(encoder); } -static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void dg1_hpd_irq_setup(struct intel_display *display) { - dg1_hpd_invert(dev_priv); - icp_hpd_irq_setup(dev_priv); + dg1_hpd_invert(display); + icp_hpd_irq_setup(display); } -static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void gen11_tc_hpd_detection_setup(struct intel_display *display) { - intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, - intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); + intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, + intel_hpd_hotplug_mask(display, gen11_hotplug_mask), + intel_hpd_hotplug_enables(display, gen11_hotplug_enables)); } static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL, - gen11_hotplug_mask(encoder->hpd_pin), - gen11_hotplug_enables(encoder)); + intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, + gen11_hotplug_mask(encoder->hpd_pin), + gen11_hotplug_enables(encoder)); } -static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void gen11_tbt_hpd_detection_setup(struct intel_display *display) { - intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, - intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); + intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, + intel_hpd_hotplug_mask(display, gen11_hotplug_mask), + intel_hpd_hotplug_enables(display, gen11_hotplug_enables)); } static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL, - gen11_hotplug_mask(encoder->hpd_pin), - gen11_hotplug_enables(encoder)); + intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, + gen11_hotplug_mask(encoder->hpd_pin), + gen11_hotplug_enables(encoder)); } static void gen11_hpd_enable_detection(struct intel_encoder *encoder) @@ -949,22 +949,23 @@ static void gen11_hpd_enable_detection(struct intel_encoder *encoder) icp_hpd_enable_detection(encoder); } -static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void gen11_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, - ~enabled_irqs & hotplug_irqs); - intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); + intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs, + ~enabled_irqs & hotplug_irqs); + intel_de_posting_read(display, GEN11_DE_HPD_IMR); - gen11_tc_hpd_detection_setup(dev_priv); - gen11_tbt_hpd_detection_setup(dev_priv); + gen11_tc_hpd_detection_setup(display); + gen11_tbt_hpd_detection_setup(display); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv); + icp_hpd_irq_setup(display); } static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin) @@ -1001,39 +1002,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder) return mtp_tc_hotplug_mask(encoder->hpd_pin); } -static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915) +static void mtp_ddi_hpd_detection_setup(struct intel_display *display) { - intel_de_rmw(i915, SHOTPLUG_CTL_DDI, - intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask), - intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables)); + intel_de_rmw(display, SHOTPLUG_CTL_DDI, + intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables)); } static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_DDI, mtp_ddi_hotplug_mask(encoder->hpd_pin), mtp_ddi_hotplug_enables(encoder)); } -static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915) +static void mtp_tc_hpd_detection_setup(struct intel_display *display) { - intel_de_rmw(i915, SHOTPLUG_CTL_TC, - intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask), - intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables)); + intel_de_rmw(display, SHOTPLUG_CTL_TC, + intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask), + intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables)); } static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_DDI, mtp_tc_hotplug_mask(encoder->hpd_pin), mtp_tc_hotplug_enables(encoder)); } -static void mtp_hpd_invert(struct drm_i915_private *i915) +static void mtp_hpd_invert(struct intel_display *display) { u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | @@ -1044,49 +1045,49 @@ static void mtp_hpd_invert(struct drm_i915_private *i915) INVERT_TC4_HPD | INVERT_DDID_HPD_MTP | INVERT_DDIE_HPD); - intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val); + intel_de_rmw(display, SOUTH_CHICKEN1, 0, val); } static void mtp_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - mtp_hpd_invert(i915); + mtp_hpd_invert(display); mtp_ddi_hpd_enable_detection(encoder); mtp_tc_hpd_enable_detection(encoder); } -static void mtp_hpd_irq_setup(struct drm_i915_private *i915) +static void mtp_hpd_irq_setup(struct intel_display *display) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); /* * Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the * SHPD_FILTER_CNT value should be. */ - intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); + intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); - mtp_hpd_invert(i915); - ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); + mtp_hpd_invert(display); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); - mtp_ddi_hpd_detection_setup(i915); - mtp_tc_hpd_detection_setup(i915); + mtp_ddi_hpd_detection_setup(display); + mtp_tc_hpd_detection_setup(display); } -static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915) +static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); - ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); - mtp_ddi_hpd_detection_setup(i915); - mtp_tc_hpd_detection_setup(i915); + mtp_ddi_hpd_detection_setup(display); + mtp_tc_hpd_detection_setup(display); } static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin) @@ -1094,7 +1095,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin) return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4; } -static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915, +static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display, enum hpd_pin hpd_pin, bool enable) { u32 mask = XELPDP_TBT_HOTPLUG_ENABLE | @@ -1103,18 +1104,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915, if (!is_xelpdp_pica_hpd_pin(hpd_pin)) return; - intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin), + intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin), mask, enable ? mask : 0); } static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true); + _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true); } -static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915) +static void xelpdp_pica_hpd_detection_setup(struct intel_display *display) { struct intel_encoder *encoder; u32 available_pins = 0; @@ -1122,11 +1123,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915) BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS); - for_each_intel_encoder(&i915->drm, encoder) + for_each_intel_encoder(display->drm, encoder) available_pins |= BIT(encoder->hpd_pin); for_each_hpd_pin(pin) - _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin)); + _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin)); } static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) @@ -1135,23 +1136,24 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) mtp_hpd_enable_detection(encoder); } -static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) +static void xelpdp_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs, + intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs, ~enabled_irqs & hotplug_irqs); - intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR); + intel_de_posting_read(display, PICAINTERRUPT_IMR); - xelpdp_pica_hpd_detection_setup(i915); + xelpdp_pica_hpd_detection_setup(display); if (INTEL_PCH_TYPE(i915) >= PCH_LNL) - xe2lpd_sde_hpd_irq_setup(i915); + xe2lpd_sde_hpd_irq_setup(display); else if (INTEL_PCH_TYPE(i915) >= PCH_MTL) - mtp_hpd_irq_setup(i915); + mtp_hpd_irq_setup(display); } static u32 spt_hotplug_mask(enum hpd_pin hpd_pin) @@ -1190,57 +1192,61 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder) return spt_hotplug2_mask(encoder->hpd_pin); } -static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void spt_hpd_detection_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(dev_priv)) { - intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, - CHASSIS_CLK_REQ_DURATION(0xf)); + intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, + CHASSIS_CLK_REQ_DURATION(0xf)); } /* Enable digital hotplug on the PCH */ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(display, spt_hotplug_mask), + intel_hpd_hotplug_enables(display, spt_hotplug_enables)); - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, - intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask), - intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG2, + intel_hpd_hotplug_mask(display, spt_hotplug2_mask), + intel_hpd_hotplug_enables(display, spt_hotplug2_enables)); } static void spt_hpd_enable_detection(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(i915)) { - intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, - CHASSIS_CLK_REQ_DURATION_MASK, - CHASSIS_CLK_REQ_DURATION(0xf)); + intel_de_rmw(display, SOUTH_CHICKEN1, + CHASSIS_CLK_REQ_DURATION_MASK, + CHASSIS_CLK_REQ_DURATION(0xf)); } - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, - spt_hotplug_mask(encoder->hpd_pin), - spt_hotplug_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + spt_hotplug_mask(encoder->hpd_pin), + spt_hotplug_enables(encoder)); - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2, - spt_hotplug2_mask(encoder->hpd_pin), - spt_hotplug2_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG2, + spt_hotplug2_mask(encoder->hpd_pin), + spt_hotplug2_enables(encoder)); } -static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void spt_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); - spt_hpd_detection_setup(dev_priv); + spt_hpd_detection_setup(display); } static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin) @@ -1265,44 +1271,44 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder) } } -static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void ilk_hpd_detection_setup(struct intel_display *display) { /* * Enable digital hotplug on the CPU, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) * The pulse duration bits are reserved on HSW+. */ - intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, - intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); + intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, + intel_hpd_hotplug_mask(display, ilk_hotplug_mask), + intel_hpd_hotplug_enables(display, ilk_hotplug_enables)); } static void ilk_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, - ilk_hotplug_mask(encoder->hpd_pin), - ilk_hotplug_enables(encoder)); + intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, + ilk_hotplug_mask(encoder->hpd_pin), + ilk_hotplug_enables(encoder)); ibx_hpd_enable_detection(encoder); } -static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void ilk_hpd_irq_setup(struct intel_display *display) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - if (DISPLAY_VER(dev_priv) >= 8) - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + if (DISPLAY_VER(display) >= 8) + bdw_update_port_irq(display, hotplug_irqs, enabled_irqs); else - ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); + ilk_update_display_irq(display, hotplug_irqs, enabled_irqs); - ilk_hpd_detection_setup(dev_priv); + ilk_hpd_detection_setup(display); - ibx_hpd_irq_setup(dev_priv); + ibx_hpd_irq_setup(display); } static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin) @@ -1344,58 +1350,59 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder) } } -static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void bxt_hpd_detection_setup(struct intel_display *display) { - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(display, bxt_hotplug_mask), + intel_hpd_hotplug_enables(display, bxt_hotplug_enables)); } static void bxt_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, - bxt_hotplug_mask(encoder->hpd_pin), - bxt_hotplug_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + bxt_hotplug_mask(encoder->hpd_pin), + bxt_hotplug_enables(encoder)); } -static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void bxt_hpd_irq_setup(struct intel_display *display) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + bdw_update_port_irq(display, hotplug_irqs, enabled_irqs); - bxt_hpd_detection_setup(dev_priv); + bxt_hpd_detection_setup(display); } -static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915) +static void g45_hpd_peg_band_gap_wa(struct intel_display *display) { /* * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ - intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd); + intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd); } static void i915_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin]; - if (IS_G45(i915)) - g45_hpd_peg_band_gap_wa(i915); + if (display->platform.g45) + g45_hpd_peg_band_gap_wa(display); /* HPD sense and interrupt enable are one and the same */ - i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en); + i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en); } -static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void i915_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_en; lockdep_assert_held(&dev_priv->irq_lock); @@ -1404,20 +1411,20 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) * Note HDMI and DP share hotplug bits. Enable bits are the same for all * generations. */ - hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); + hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915); /* * Programming the CRT detection parameters tends to generate a spurious * hotplug event about three seconds later. So just do it once. */ - if (IS_G4X(dev_priv)) + if (display->platform.g4x) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; - if (IS_G45(dev_priv)) - g45_hpd_peg_band_gap_wa(dev_priv); + if (display->platform.g45) + g45_hpd_peg_band_gap_wa(display); /* Ignore TV since it's buggy */ - i915_hotplug_interrupt_update_locked(dev_priv, + i915_hotplug_interrupt_update_locked(display, HOTPLUG_INT_EN_MASK | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | CRT_HOTPLUG_ACTIVATION_PERIOD_64, @@ -1426,7 +1433,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) struct intel_hotplug_funcs { /* Enable HPD sense and interrupts for all present encoders */ - void (*hpd_irq_setup)(struct drm_i915_private *i915); + void (*hpd_irq_setup)(struct intel_display *display); /* Enable HPD sense for a single encoder */ void (*hpd_enable_detection)(struct intel_encoder *encoder); }; @@ -1449,47 +1456,49 @@ HPD_FUNCS(ilk); void intel_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (i915->display.funcs.hotplug) - i915->display.funcs.hotplug->hpd_enable_detection(encoder); + if (display->funcs.hotplug) + display->funcs.hotplug->hpd_enable_detection(encoder); } -void intel_hpd_irq_setup(struct drm_i915_private *i915) +void intel_hpd_irq_setup(struct intel_display *display) { - if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) && - !i915->display.irq.vlv_display_irqs_enabled) + if ((display->platform.valleyview || display->platform.cherryview) && + !display->irq.vlv_display_irqs_enabled) return; - if (i915->display.funcs.hotplug) - i915->display.funcs.hotplug->hpd_irq_setup(i915); + if (display->funcs.hotplug) + display->funcs.hotplug->hpd_irq_setup(display); } -void intel_hotplug_irq_init(struct drm_i915_private *i915) +void intel_hotplug_irq_init(struct intel_display *display) { - intel_hpd_init_pins(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + + intel_hpd_init_pins(display); - intel_hpd_init_early(i915); + intel_hpd_init_early(display); - if (HAS_GMCH(i915)) { - if (I915_HAS_HOTPLUG(i915)) - i915->display.funcs.hotplug = &i915_hpd_funcs; + if (HAS_GMCH(display)) { + if (HAS_HOTPLUG(display)) + display->funcs.hotplug = &i915_hpd_funcs; } else { if (HAS_PCH_DG2(i915)) - i915->display.funcs.hotplug = &icp_hpd_funcs; + display->funcs.hotplug = &icp_hpd_funcs; else if (HAS_PCH_DG1(i915)) - i915->display.funcs.hotplug = &dg1_hpd_funcs; - else if (DISPLAY_VER(i915) >= 14) - i915->display.funcs.hotplug = &xelpdp_hpd_funcs; - else if (DISPLAY_VER(i915) >= 11) - i915->display.funcs.hotplug = &gen11_hpd_funcs; - else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - i915->display.funcs.hotplug = &bxt_hpd_funcs; + display->funcs.hotplug = &dg1_hpd_funcs; + else if (DISPLAY_VER(display) >= 14) + display->funcs.hotplug = &xelpdp_hpd_funcs; + else if (DISPLAY_VER(display) >= 11) + display->funcs.hotplug = &gen11_hpd_funcs; + else if (display->platform.geminilake || display->platform.broxton) + display->funcs.hotplug = &bxt_hpd_funcs; else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) - i915->display.funcs.hotplug = &icp_hpd_funcs; + display->funcs.hotplug = &icp_hpd_funcs; else if (INTEL_PCH_TYPE(i915) >= PCH_SPT) - i915->display.funcs.hotplug = &spt_hpd_funcs; + display->funcs.hotplug = &spt_hpd_funcs; else - i915->display.funcs.hotplug = &ilk_hpd_funcs; + display->funcs.hotplug = &ilk_hpd_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h index e4db752df096..9063bb02a2e9 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h @@ -8,28 +8,28 @@ #include <linux/types.h> -struct drm_i915_private; +struct intel_display; struct intel_encoder; -u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915); +u32 i9xx_hpd_irq_ack(struct intel_display *display); -void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status); -void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); -void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); -void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir); -void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); -void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir); -void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir); -void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir); +void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status); +void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger); +void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger); +void gen11_hpd_irq_handler(struct intel_display *display, u32 iir); +void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger); +void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir); +void icp_irq_handler(struct intel_display *display, u32 pch_iir); +void spt_irq_handler(struct intel_display *display, u32 pch_iir); -void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915, +void i915_hotplug_interrupt_update_locked(struct intel_display *display, u32 mask, u32 bits); -void i915_hotplug_interrupt_update(struct drm_i915_private *i915, +void i915_hotplug_interrupt_update(struct intel_display *display, u32 mask, u32 bits); void intel_hpd_enable_detection(struct intel_encoder *encoder); -void intel_hpd_irq_setup(struct drm_i915_private *i915); +void intel_hpd_irq_setup(struct intel_display *display); -void intel_hotplug_irq_init(struct drm_i915_private *i915); +void intel_hotplug_irq_init(struct intel_display *display); #endif /* __INTEL_HOTPLUG_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c index fb6b84f6a81d..dc454420c134 100644 --- a/drivers/gpu/drm/i915/display/intel_hti.c +++ b/drivers/gpu/drm/i915/display/intel_hti.c @@ -4,6 +4,7 @@ */ #include <drm/drm_device.h> +#include <drm/drm_print.h> #include "intel_de.h" #include "intel_display.h" diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c index 86cc03a4413c..aad52d0d83e1 100644 --- a/drivers/gpu/drm/i915/display/intel_load_detect.c +++ b/drivers/gpu/drm/i915/display/intel_load_detect.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_print.h> #include "intel_atomic.h" #include "intel_crtc.h" diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 63c1afa30b05..f94b7eeae20f 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -27,6 +27,7 @@ #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> +#include <drm/drm_print.h> #include "i915_reg.h" #include "i915_utils.h" diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 19f52d1659fa..89d26913e253 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -84,12 +84,13 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder) return container_of(encoder, struct intel_lvds_encoder, base); } -bool intel_lvds_port_enabled(struct drm_i915_private *i915, +bool intel_lvds_port_enabled(struct intel_display *display, i915_reg_t lvds_reg, enum pipe *pipe) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val; - val = intel_de_read(i915, lvds_reg); + val = intel_de_read(display, lvds_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(i915)) @@ -104,7 +105,6 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); intel_wakeref_t wakeref; bool ret; @@ -113,7 +113,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, if (!wakeref) return false; - ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe); + ret = intel_lvds_port_enabled(display, lvds_encoder->reg, pipe); intel_display_power_put(display, encoder->power_domain, wakeref); @@ -123,13 +123,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, static void intel_lvds_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); u32 tmp, flags = 0; crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS); - tmp = intel_de_read(dev_priv, lvds_encoder->reg); + tmp = intel_de_read(display, lvds_encoder->reg); if (tmp & LVDS_HSYNC_POLARITY) flags |= DRM_MODE_FLAG_NHSYNC; else @@ -141,13 +141,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, crtc_state->hw.adjusted_mode.flags |= flags; - if (DISPLAY_VER(dev_priv) < 5) + if (DISPLAY_VER(display) < 5) crtc_state->gmch_pfit.lvds_border_bits = tmp & LVDS_BORDER_ENABLE; /* gen2/3 store dither state in pfit control, needs to match */ - if (DISPLAY_VER(dev_priv) < 4) { - tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)); + if (DISPLAY_VER(display) < 4) { + tmp = intel_de_read(display, PFIT_CONTROL(display)); crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE; } @@ -155,24 +155,24 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock; } -static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, +static void intel_lvds_pps_get_hw_state(struct intel_display *display, struct intel_lvds_pps *pps) { u32 val; - pps->powerdown_on_reset = intel_de_read(dev_priv, - PP_CONTROL(dev_priv, 0)) & PANEL_POWER_RESET; + pps->powerdown_on_reset = intel_de_read(display, + PP_CONTROL(display, 0)) & PANEL_POWER_RESET; - val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)); + val = intel_de_read(display, PP_ON_DELAYS(display, 0)); pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val); pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val); pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val); - val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0)); + val = intel_de_read(display, PP_OFF_DELAYS(display, 0)); pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val); pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val); - val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0)); + val = intel_de_read(display, PP_DIVISOR(display, 0)); pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val); val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val); /* @@ -185,12 +185,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, /* Convert from 100ms to 100us units */ pps->delays.power_cycle = val * 1000; - if (DISPLAY_VER(dev_priv) < 5 && + if (DISPLAY_VER(display) < 5 && pps->delays.power_up == 0 && pps->delays.backlight_on == 0 && pps->delays.power_down == 0 && pps->delays.backlight_off == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Panel power timings uninitialized, " "setting defaults\n"); /* Set T2 to 40ms and T5 to 200ms in 100 usec units */ @@ -201,7 +201,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, pps->delays.backlight_off = 200 * 10; } - drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d " + drm_dbg(display->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d " "divider %d port %d powerdown_on_reset %d\n", pps->delays.power_up, pps->delays.power_down, pps->delays.power_cycle, pps->delays.backlight_on, @@ -209,28 +209,28 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, pps->port, pps->powerdown_on_reset); } -static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, +static void intel_lvds_pps_init_hw(struct intel_display *display, struct intel_lvds_pps *pps) { u32 val; - val = intel_de_read(dev_priv, PP_CONTROL(dev_priv, 0)); - drm_WARN_ON(&dev_priv->drm, + val = intel_de_read(display, PP_CONTROL(display, 0)); + drm_WARN_ON(display->drm, (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS); if (pps->powerdown_on_reset) val |= PANEL_POWER_RESET; - intel_de_write(dev_priv, PP_CONTROL(dev_priv, 0), val); + intel_de_write(display, PP_CONTROL(display, 0), val); - intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0), + intel_de_write(display, PP_ON_DELAYS(display, 0), REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on)); - intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0), + intel_de_write(display, PP_OFF_DELAYS(display, 0), REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off)); - intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0), + intel_de_write(display, PP_DIVISOR(display, 0), REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1)); @@ -256,7 +256,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, assert_pll_disabled(display, pipe); } - intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps); + intel_lvds_pps_init_hw(display, &lvds_encoder->init_pps); temp = lvds_encoder->init_lvds_val; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; @@ -296,7 +296,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the TRANSCONF reg. */ - if (DISPLAY_VER(i915) == 4) { + if (DISPLAY_VER(display) == 4) { /* * Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. @@ -312,7 +312,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) temp |= LVDS_VSYNC_POLARITY; - intel_de_write(i915, lvds_encoder->reg, temp); + intel_de_write(display, lvds_encoder->reg, temp); } /* @@ -323,16 +323,16 @@ static void intel_enable_lvds(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN); + intel_de_rmw(display, lvds_encoder->reg, 0, LVDS_PORT_EN); - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), 0, PANEL_POWER_ON); - intel_de_posting_read(dev_priv, lvds_encoder->reg); + intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON); + intel_de_posting_read(display, lvds_encoder->reg); - if (intel_de_wait_for_set(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 5000)) - drm_err(&dev_priv->drm, + if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000)) + drm_err(display->drm, "timed out waiting for panel to power on\n"); intel_backlight_enable(crtc_state, conn_state); @@ -343,16 +343,16 @@ static void intel_disable_lvds(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_display *display = to_intel_display(encoder); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), PANEL_POWER_ON, 0); - if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 1000)) - drm_err(&dev_priv->drm, + intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0); + if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000)) + drm_err(display->drm, "timed out waiting for panel to power off\n"); - intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0); - intel_de_posting_read(dev_priv, lvds_encoder->reg); + intel_de_rmw(display, lvds_encoder->reg, LVDS_PORT_EN, 0); + intel_de_posting_read(display, lvds_encoder->reg); } static void gmch_disable_lvds(struct intel_atomic_state *state, @@ -384,10 +384,10 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state, static void intel_lvds_shutdown(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_CYCLE_DELAY_ACTIVE, 5000)) - drm_err(&dev_priv->drm, + if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000)) + drm_err(display->drm, "timed out waiting for panel power cycle delay\n"); } @@ -420,6 +420,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct intel_connector *connector = lvds_encoder->attached_connector; @@ -429,8 +430,8 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder, int ret; /* Should never happen!! */ - if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) { - drm_err(&i915->drm, "Can't support LVDS on pipe A\n"); + if (DISPLAY_VER(display) < 4 && crtc->pipe == 0) { + drm_err(display->drm, "Can't support LVDS on pipe A\n"); return -EINVAL; } @@ -447,7 +448,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder, /* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */ if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "forcing display bpp (was %d) to LVDS (%d)\n", crtc_state->pipe_bpp, lvds_bpp); crtc_state->pipe_bpp = lvds_bpp; @@ -775,11 +776,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915) +struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display) { struct intel_encoder *encoder; - for_each_intel_encoder(&i915->drm, encoder) { + for_each_intel_encoder(display->drm, encoder) { if (encoder->type == INTEL_OUTPUT_LVDS) return encoder; } @@ -787,15 +788,16 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915) return NULL; } -bool intel_is_dual_link_lvds(struct drm_i915_private *i915) +bool intel_is_dual_link_lvds(struct intel_display *display) { - struct intel_encoder *encoder = intel_get_lvds_encoder(i915); + struct intel_encoder *encoder = intel_get_lvds_encoder(display); return encoder && to_lvds_encoder(encoder)->is_dual_link; } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) { + struct intel_display *display = to_intel_display(&lvds_encoder->base); struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev); struct intel_connector *connector = lvds_encoder->attached_connector; const struct drm_display_mode *fixed_mode = @@ -803,8 +805,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) unsigned int val; /* use the module option value if specified */ - if (i915->display.params.lvds_channel_mode > 0) - return i915->display.params.lvds_channel_mode == 2; + if (display->params.lvds_channel_mode > 0) + return display->params.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (fixed_mode->clock > 112999) @@ -819,7 +821,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) * we need to check "the value to be set" in VBT when LVDS * register is uninitialized. */ - val = intel_de_read(i915, lvds_encoder->reg); + val = intel_de_read(display, lvds_encoder->reg); if (HAS_PCH_CPT(i915)) val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); else @@ -837,14 +839,14 @@ static void intel_lvds_add_properties(struct drm_connector *connector) /** * intel_lvds_init - setup LVDS connectors on this device - * @i915: i915 device + * @display: display device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -void intel_lvds_init(struct drm_i915_private *i915) +void intel_lvds_init(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_lvds_encoder *lvds_encoder; struct intel_connector *connector; const struct drm_edid *drm_edid; @@ -855,13 +857,13 @@ void intel_lvds_init(struct drm_i915_private *i915) /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { - drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support, + drm_WARN(display->drm, !display->vbt.int_lvds_support, "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } - if (!i915->display.vbt.int_lvds_support) { - drm_dbg_kms(&i915->drm, + if (!display->vbt.int_lvds_support) { + drm_dbg_kms(display->drm, "Internal LVDS support disabled by VBT\n"); return; } @@ -871,7 +873,7 @@ void intel_lvds_init(struct drm_i915_private *i915) else lvds_reg = LVDS; - lvds = intel_de_read(i915, lvds_reg); + lvds = intel_de_read(display, lvds_reg); if (HAS_PCH_SPLIT(i915)) { if ((lvds & LVDS_DETECTED) == 0) @@ -881,11 +883,11 @@ void intel_lvds_init(struct drm_i915_private *i915) ddc_pin = GMBUS_PIN_PANEL; if (!intel_bios_is_lvds_present(display, &ddc_pin)) { if ((lvds & LVDS_PORT_EN) == 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "LVDS is not present in VBT\n"); return; } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "LVDS is not present in VBT, but enabled anyway\n"); } @@ -902,12 +904,12 @@ void intel_lvds_init(struct drm_i915_private *i915) lvds_encoder->attached_connector = connector; encoder = &lvds_encoder->base; - drm_connector_init_with_ddc(&i915->drm, &connector->base, + drm_connector_init_with_ddc(display->drm, &connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS, intel_gmbus_get_adapter(display, ddc_pin)); - drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, + drm_encoder_init(display->drm, &encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS, "LVDS"); encoder->enable = intel_enable_lvds; @@ -931,7 +933,7 @@ void intel_lvds_init(struct drm_i915_private *i915) encoder->power_domain = POWER_DOMAIN_PORT_OTHER; encoder->port = PORT_NONE; encoder->cloneable = 0; - if (DISPLAY_VER(i915) < 4) + if (DISPLAY_VER(display) < 4) encoder->pipe_mask = BIT(PIPE_B); else encoder->pipe_mask = ~0; @@ -943,7 +945,7 @@ void intel_lvds_init(struct drm_i915_private *i915) intel_lvds_add_properties(&connector->base); - intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps); + intel_lvds_pps_get_hw_state(display, &lvds_encoder->init_pps); lvds_encoder->init_lvds_val = lvds; /* @@ -958,7 +960,7 @@ void intel_lvds_init(struct drm_i915_private *i915) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - mutex_lock(&i915->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) drm_edid = drm_edid_read_switcheroo(&connector->base, connector->base.ddc); else @@ -991,7 +993,7 @@ void intel_lvds_init(struct drm_i915_private *i915) if (!intel_panel_preferred_fixed_mode(connector)) intel_panel_add_encoder_fixed_mode(connector, encoder); - mutex_unlock(&i915->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); /* If we still don't have a mode after all that, give up. */ if (!intel_panel_preferred_fixed_mode(connector)) @@ -1002,7 +1004,7 @@ void intel_lvds_init(struct drm_i915_private *i915) intel_backlight_setup(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); - drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n", + drm_dbg_kms(display->drm, "detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; @@ -1010,7 +1012,7 @@ void intel_lvds_init(struct drm_i915_private *i915) return; failed: - drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n"); + drm_dbg_kms(display->drm, "No LVDS modes found, disabling.\n"); drm_connector_cleanup(&connector->base); drm_encoder_cleanup(&encoder->base); kfree(lvds_encoder); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h index 7ad5fa9c0434..a6db1706a97c 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.h +++ b/drivers/gpu/drm/i915/display/intel_lvds.h @@ -11,28 +11,28 @@ #include "i915_reg_defs.h" enum pipe; -struct drm_i915_private; +struct intel_display; #ifdef I915 -bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, +bool intel_lvds_port_enabled(struct intel_display *display, i915_reg_t lvds_reg, enum pipe *pipe); -void intel_lvds_init(struct drm_i915_private *dev_priv); -struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv); -bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv); +void intel_lvds_init(struct intel_display *display); +struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display); +bool intel_is_dual_link_lvds(struct intel_display *display); #else -static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, +static inline bool intel_lvds_port_enabled(struct intel_display *display, i915_reg_t lvds_reg, enum pipe *pipe) { return false; } -static inline void intel_lvds_init(struct drm_i915_private *dev_priv) +static inline void intel_lvds_init(struct intel_display *display) { } -static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) +static inline struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display) { return NULL; } -static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) +static inline bool intel_is_dual_link_lvds(struct intel_display *display) { return false; } diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 312b21b1ab59..9e963bce340f 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -155,9 +155,8 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc) static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_pmdemand_state *pmdemand_state = - to_intel_pmdemand_state(i915->display.pmdemand.obj.state); + to_intel_pmdemand_state(display->pmdemand.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum pipe pipe = crtc->pipe; @@ -169,7 +168,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) reset_crtc_encoder_state(crtc); intel_fbc_disable(crtc); - intel_update_watermarks(i915); + intel_update_watermarks(display); intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains); @@ -821,18 +820,18 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; - if (crtc_state->hw.active) { - /* - * The initial mode needs to be set in order to keep - * the atomic core happy. It wants a valid mode if the - * crtc's enabled, so we do the above call. - * - * But we don't set all the derived state fully, hence - * set a flag to indicate that a full recalculation is - * needed on the next commit. - */ - crtc_state->inherited = true; + /* + * The initial mode needs to be set in order to keep + * the atomic core happy. It wants a valid mode if the + * crtc's enabled, so we do the above call. + * + * But we don't set all the derived state fully, hence + * set a flag to indicate that a full recalculation is + * needed on the next commit. + */ + crtc_state->inherited = true; + if (crtc_state->hw.active) { intel_crtc_update_active_timings(crtc_state, crtc_state->vrr.enable); @@ -874,7 +873,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) /* TODO move here (or even earlier?) on all platforms */ if (DISPLAY_VER(display) >= 9) - intel_wm_get_hw_state(i915); + intel_wm_get_hw_state(display); intel_bw_update_hw_state(display); intel_cdclk_update_hw_state(display); @@ -947,7 +946,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(i915); - intel_pch_sanitize(i915); + intel_pch_sanitize(display); intel_cmtg_sanitize(display); @@ -988,8 +987,8 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, /* TODO move earlier on all platforms */ if (DISPLAY_VER(display) < 9) - intel_wm_get_hw_state(i915); - intel_wm_sanitize(i915); + intel_wm_get_hw_state(display); + intel_wm_sanitize(display); for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 99f6d6f53fa7..b909ed18a5b2 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -20,9 +20,11 @@ #include "intel_pps.h" #include "intel_sdvo.h" -bool intel_has_pch_trancoder(struct drm_i915_private *i915, +bool intel_has_pch_trancoder(struct intel_display *display, enum pipe pch_transcoder) { + struct drm_i915_private *i915 = to_i915(display->drm); + return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) || (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A); } @@ -37,11 +39,11 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) return crtc->pipe; } -static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, +static void assert_pch_dp_disabled(struct intel_display *display, enum pipe pipe, enum port port, i915_reg_t dp_reg) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe port_pipe; bool state; @@ -57,11 +59,11 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, port_name(port)); } -static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, +static void assert_pch_hdmi_disabled(struct intel_display *display, enum pipe pipe, enum port port, i915_reg_t hdmi_reg) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe port_pipe; bool state; @@ -77,15 +79,14 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, port_name(port)); } -static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, +static void assert_pch_ports_disabled(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; enum pipe port_pipe; - assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); - assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); - assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); + assert_pch_dp_disabled(display, pipe, PORT_B, PCH_DP_B); + assert_pch_dp_disabled(display, pipe, PORT_C, PCH_DP_C); + assert_pch_dp_disabled(display, pipe, PORT_D, PCH_DP_D); INTEL_DISPLAY_STATE_WARN(display, intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe, @@ -93,20 +94,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); INTEL_DISPLAY_STATE_WARN(display, - intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe, + intel_lvds_port_enabled(display, PCH_LVDS, &port_pipe) && port_pipe == pipe, "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); /* PCH SDVOB multiplex with HDMIB */ - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); + assert_pch_hdmi_disabled(display, pipe, PORT_B, PCH_HDMIB); + assert_pch_hdmi_disabled(display, pipe, PORT_C, PCH_HDMIC); + assert_pch_hdmi_disabled(display, pipe, PORT_D, PCH_HDMID); } -static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, +static void assert_pch_transcoder_disabled(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; u32 val; bool enabled; @@ -117,45 +117,45 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } -static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, +static void ibx_sanitize_pch_hdmi_port(struct intel_display *display, enum port port, i915_reg_t hdmi_reg) { - u32 val = intel_de_read(dev_priv, hdmi_reg); + u32 val = intel_de_read(display, hdmi_reg); if (val & SDVO_ENABLE || (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) return; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Sanitizing transcoder select for HDMI %c\n", port_name(port)); val &= ~SDVO_PIPE_SEL_MASK; val |= SDVO_PIPE_SEL(PIPE_A); - intel_de_write(dev_priv, hdmi_reg, val); + intel_de_write(display, hdmi_reg, val); } -static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, +static void ibx_sanitize_pch_dp_port(struct intel_display *display, enum port port, i915_reg_t dp_reg) { - u32 val = intel_de_read(dev_priv, dp_reg); + u32 val = intel_de_read(display, dp_reg); if (val & DP_PORT_EN || (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) return; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Sanitizing transcoder select for DP %c\n", port_name(port)); val &= ~DP_PIPE_SEL_MASK; val |= DP_PIPE_SEL(PIPE_A); - intel_de_write(dev_priv, dp_reg, val); + intel_de_write(display, dp_reg, val); } -static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) +static void ibx_sanitize_pch_ports(struct intel_display *display) { /* * The BIOS may select transcoder B on some of the PCH @@ -168,14 +168,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) * (see. intel_dp_link_down(), intel_disable_hdmi(), * intel_disable_sdvo()). */ - ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); - ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); - ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); + ibx_sanitize_pch_dp_port(display, PORT_B, PCH_DP_B); + ibx_sanitize_pch_dp_port(display, PORT_C, PCH_DP_C); + ibx_sanitize_pch_dp_port(display, PORT_D, PCH_DP_D); /* PCH SDVOB multiplex with HDMIB */ - ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); - ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); - ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); + ibx_sanitize_pch_hdmi_port(display, PORT_B, PCH_HDMIB); + ibx_sanitize_pch_hdmi_port(display, PORT_C, PCH_HDMIC); + ibx_sanitize_pch_hdmi_port(display, PORT_D, PCH_HDMID); } static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc, @@ -225,31 +225,30 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), - intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), - intel_de_read(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), - intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder))); - - intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), - intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), - intel_de_read(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), - intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), - intel_de_read(dev_priv, TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder))); + intel_de_write(display, PCH_TRANS_HTOTAL(pch_transcoder), + intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder))); + intel_de_write(display, PCH_TRANS_HBLANK(pch_transcoder), + intel_de_read(display, TRANS_HBLANK(display, cpu_transcoder))); + intel_de_write(display, PCH_TRANS_HSYNC(pch_transcoder), + intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder))); + + intel_de_write(display, PCH_TRANS_VTOTAL(pch_transcoder), + intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder))); + intel_de_write(display, PCH_TRANS_VBLANK(pch_transcoder), + intel_de_read(display, TRANS_VBLANK(display, cpu_transcoder))); + intel_de_write(display, PCH_TRANS_VSYNC(pch_transcoder), + intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder))); + intel_de_write(display, PCH_TRANS_VSYNCSHIFT(pch_transcoder), + intel_de_read(display, TRANS_VSYNCSHIFT(display, cpu_transcoder))); } static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; @@ -326,18 +325,18 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) assert_fdi_rx_disabled(display, pipe); /* Ports must be off as well */ - assert_pch_ports_disabled(dev_priv, pipe); + assert_pch_ports_disabled(display, pipe); reg = PCH_TRANSCONF(pipe); - intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0); + intel_de_rmw(display, reg, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", + if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50)) + drm_err(display->drm, "failed to disable transcoder %c\n", pipe_name(pipe)); if (HAS_PCH_CPT(dev_priv)) /* Workaround: Clear the timing override chicken bit again. */ - intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe), + intel_de_rmw(display, TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } @@ -366,14 +365,14 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state, void ilk_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct intel_display *display = to_intel_display(state); + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; u32 temp; - assert_pch_transcoder_disabled(dev_priv, pipe); + assert_pch_transcoder_disabled(display, pipe); /* For PCH output, training FDI link */ intel_fdi_link_train(crtc, crtc_state); @@ -459,23 +458,28 @@ void ilk_pch_disable(struct intel_atomic_state *state, void ilk_pch_post_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; ilk_disable_pch_transcoder(crtc); if (HAS_PCH_CPT(dev_priv)) { /* disable TRANS_DP_CTL */ - intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe), + intel_de_rmw(display, TRANS_DP_CTL(pipe), TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK, TRANS_DP_PORT_SEL_NONE); /* disable DPLL_SEL */ - intel_de_rmw(dev_priv, PCH_DPLL_SEL, + intel_de_rmw(display, PCH_DPLL_SEL, TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0); } ilk_fdi_pll_disable(crtc); + + intel_disable_shared_dpll(old_crtc_state); } static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) @@ -497,8 +501,8 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) void ilk_pch_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum pipe pipe = crtc->pipe; @@ -550,8 +554,6 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state) static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val, pipeconf_val; @@ -559,49 +561,49 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder); assert_fdi_rx_enabled(display, PIPE_A); - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + val = intel_de_read(display, TRANS_CHICKEN2(PIPE_A)); /* Workaround: set timing override bit. */ val |= TRANS_CHICKEN2_TIMING_OVERRIDE; /* Configure frame start delay to match the CPU */ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); + intel_de_write(display, TRANS_CHICKEN2(PIPE_A), val); val = TRANS_ENABLE; - pipeconf_val = intel_de_read(dev_priv, - TRANSCONF(dev_priv, cpu_transcoder)); + pipeconf_val = intel_de_read(display, + TRANSCONF(display, cpu_transcoder)); if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK) val |= TRANS_INTERLACE_INTERLACED; else val |= TRANS_INTERLACE_PROGRESSIVE; - intel_de_write(dev_priv, LPT_TRANSCONF, val); - if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, + intel_de_write(display, LPT_TRANSCONF, val); + if (intel_de_wait_for_set(display, LPT_TRANSCONF, TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); + drm_err(display->drm, "Failed to enable PCH transcoder\n"); } -static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) +static void lpt_disable_pch_transcoder(struct intel_display *display) { - intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0); + intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, + if (intel_de_wait_for_clear(display, LPT_TRANSCONF, TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); + drm_err(display->drm, "Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ - intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); + intel_de_rmw(display, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void lpt_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - assert_pch_transcoder_disabled(dev_priv, PIPE_A); + assert_pch_transcoder_disabled(display, PIPE_A); lpt_program_iclkip(crtc_state); @@ -614,36 +616,38 @@ void lpt_pch_enable(struct intel_atomic_state *state, void lpt_pch_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - lpt_disable_pch_transcoder(dev_priv); + lpt_disable_pch_transcoder(display); - lpt_disable_iclkip(dev_priv); + lpt_disable_iclkip(display); } void lpt_pch_get_config(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; - if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0) + if ((intel_de_read(display, LPT_TRANSCONF) & TRANS_ENABLE) == 0) return; crtc_state->has_pch_encoder = true; - tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + tmp = intel_de_read(display, FDI_RX_CTL(PIPE_A)); crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, &crtc_state->fdi_m_n); - crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); + crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(display); } -void intel_pch_sanitize(struct drm_i915_private *i915) +void intel_pch_sanitize(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + if (HAS_PCH_IBX(i915)) - ibx_sanitize_pch_ports(i915); + ibx_sanitize_pch_ports(display); } diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h index 35f8288af3d1..cd6b3ed05887 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.h +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -9,14 +9,14 @@ #include <linux/types.h> enum pipe; -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_link_m_n; #ifdef I915 -bool intel_has_pch_trancoder(struct drm_i915_private *i915, +bool intel_has_pch_trancoder(struct intel_display *display, enum pipe pch_transcoder); enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); @@ -41,9 +41,9 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n); -void intel_pch_sanitize(struct drm_i915_private *i915); +void intel_pch_sanitize(struct intel_display *display); #else -static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915, +static inline bool intel_has_pch_trancoder(struct intel_display *display, enum pipe pch_transcoder) { return false; @@ -90,7 +90,7 @@ static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { } -static inline void intel_pch_sanitize(struct drm_i915_private *i915) +static inline void intel_pch_sanitize(struct intel_display *display) { } #endif diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 33467de3d115..1307a478861a 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -11,27 +11,28 @@ #include "intel_pch_refclk.h" #include "intel_sbi.h" -static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) +static void lpt_fdi_reset_mphy(struct intel_display *display) { - intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); + intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); - if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & + if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); + drm_err(display->drm, "FDI mPHY reset assert timeout\n"); - intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); + intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); - if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & + if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); + drm_err(display->drm, "FDI mPHY reset de-assert timeout\n"); } /* WaMPhyProgramming:hsw */ -static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) +static void lpt_fdi_program_mphy(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 tmp; - lpt_fdi_reset_mphy(dev_priv); + lpt_fdi_reset_mphy(display); tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); @@ -103,11 +104,12 @@ static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); } -void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +void lpt_disable_iclkip(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 temp; - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); + intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE); intel_sbi_lock(dev_priv); @@ -175,24 +177,25 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state) /* Program iCLKIP clock to the desired frequency */ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int clock = crtc_state->hw.adjusted_mode.crtc_clock; struct iclkip_params p; u32 temp; - lpt_disable_iclkip(dev_priv); + lpt_disable_iclkip(display); lpt_compute_iclkip(&p, clock); - drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock); + drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock); /* This should not happen with any sane values */ - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & + drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) & + drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) & ~SBI_SSCDIVINTPHASE_INCVAL_MASK); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc); @@ -224,15 +227,16 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) /* Wait for initialization time */ udelay(24); - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); + intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE); } -int lpt_get_iclkip(struct drm_i915_private *dev_priv) +int lpt_get_iclkip(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct iclkip_params p; u32 temp; - if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) + if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) return 0; iclkip_params_init(&p); @@ -268,15 +272,16 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) * - Sequence to enable CLKOUT_DP without spread * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O */ -static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, +static void lpt_enable_clkout_dp(struct intel_display *display, bool with_spread, bool with_fdi) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 reg, tmp; - if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, + if (drm_WARN(display->drm, with_fdi && !with_spread, "FDI requires downspread\n")) with_spread = true; - if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && + if (drm_WARN(display->drm, HAS_PCH_LPT_LP(dev_priv) && with_fdi, "LP PCH doesn't have FDI\n")) with_fdi = false; @@ -295,7 +300,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); if (with_fdi) - lpt_fdi_program_mphy(dev_priv); + lpt_fdi_program_mphy(display); } reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; @@ -307,8 +312,9 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, } /* Sequence to disable CLKOUT_DP */ -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) +void lpt_disable_clkout_dp(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 reg, tmp; intel_sbi_lock(dev_priv); @@ -364,15 +370,16 @@ static const u16 sscdivintphase[] = { * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) * change in clock period = -(steps / 10) * 5.787 ps */ -static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) +static void lpt_bend_clkout_dp(struct intel_display *display, int steps) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 tmp; int idx = BEND_IDX(steps); - if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) + if (drm_WARN_ON(display->drm, steps % 5 != 0)) return; - if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) + if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase))) return; intel_sbi_lock(dev_priv); @@ -393,10 +400,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) #undef BEND_IDX -static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) +static bool spll_uses_pch_ssc(struct intel_display *display) { - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, SPLL_CTL); + u32 fuse_strap = intel_de_read(display, FUSE_STRAP); + u32 ctl = intel_de_read(display, SPLL_CTL); if ((ctl & SPLL_PLL_ENABLE) == 0) return false; @@ -405,18 +412,17 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) return true; - if (IS_BROADWELL(dev_priv) && + if (display->platform.broadwell && (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) return true; return false; } -static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, - enum intel_dpll_id id) +static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id) { - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); + u32 fuse_strap = intel_de_read(display, FUSE_STRAP); + u32 ctl = intel_de_read(display, WRPLL_CTL(id)); if ((ctl & WRPLL_PLL_ENABLE) == 0) return false; @@ -424,7 +430,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) return true; - if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) && + if ((display->platform.broadwell || display->platform.haswell_ult) && (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) return true; @@ -432,12 +438,12 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, return false; } -static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) +static void lpt_init_pch_refclk(struct intel_display *display) { struct intel_encoder *encoder; bool has_fdi = false; - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(display->drm, encoder) { switch (encoder->type) { case INTEL_OUTPUT_ANALOG: has_fdi = true; @@ -462,37 +468,37 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) * clock hierarchy. That would also allow us to do * clock bending finally. */ - dev_priv->display.dpll.pch_ssc_use = 0; + display->dpll.pch_ssc_use = 0; - if (spll_uses_pch_ssc(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); - dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL); + if (spll_uses_pch_ssc(display)) { + drm_dbg_kms(display->drm, "SPLL using PCH SSC\n"); + display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL); } - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); - dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1); + if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) { + drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n"); + display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1); } - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); - dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2); + if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) { + drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n"); + display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2); } - if (dev_priv->display.dpll.pch_ssc_use) + if (display->dpll.pch_ssc_use) return; if (has_fdi) { - lpt_bend_clkout_dp(dev_priv, 0); - lpt_enable_clkout_dp(dev_priv, true, true); + lpt_bend_clkout_dp(display, 0); + lpt_enable_clkout_dp(display, true, true); } else { - lpt_disable_clkout_dp(dev_priv); + lpt_disable_clkout_dp(display); } } -static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) +static void ilk_init_pch_refclk(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; struct intel_shared_dpll *pll; int i; @@ -607,7 +613,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* SSC must be turned on before enabling the CPU output */ if (intel_panel_use_ssc(display) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); + drm_dbg_kms(display->drm, "Using SSC on panel\n"); val |= DREF_SSC1_ENABLE; } else { val &= ~DREF_SSC1_ENABLE; @@ -623,7 +629,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* Enable CPU source on CPU attached eDP */ if (has_cpu_edp) { if (intel_panel_use_ssc(display) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Using SSC on eDP\n"); val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; } else { @@ -670,10 +676,12 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* * Initialize reference clocks when the driver loads */ -void intel_init_pch_refclk(struct drm_i915_private *dev_priv) +void intel_init_pch_refclk(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ilk_init_pch_refclk(dev_priv); + ilk_init_pch_refclk(display); else if (HAS_PCH_LPT(dev_priv)) - lpt_init_pch_refclk(dev_priv); + lpt_init_pch_refclk(display); } diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h index ae3403c0ced8..25cc53c568bc 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -8,25 +8,25 @@ #include <linux/types.h> -struct drm_i915_private; struct intel_crtc_state; +struct intel_display; #ifdef I915 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); -void lpt_disable_iclkip(struct drm_i915_private *dev_priv); -int lpt_get_iclkip(struct drm_i915_private *dev_priv); +void lpt_disable_iclkip(struct intel_display *display); +int lpt_get_iclkip(struct intel_display *display); int lpt_iclkip(const struct intel_crtc_state *crtc_state); -void intel_init_pch_refclk(struct drm_i915_private *dev_priv); -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); +void intel_init_pch_refclk(struct intel_display *display); +void lpt_disable_clkout_dp(struct intel_display *display); #else static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { } -static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +static inline void lpt_disable_iclkip(struct intel_display *display) { } -static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv) +static inline int lpt_get_iclkip(struct intel_display *display) { return 0; } @@ -34,10 +34,10 @@ static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state) { return 0; } -static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv) +static inline void intel_init_pch_refclk(struct intel_display *display) { } -static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) +static inline void lpt_disable_clkout_dp(struct intel_display *display) { } #endif diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 10e26c3db946..6182f484b5bd 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -75,7 +75,7 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, return 0; } -static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, +static void i9xx_pipe_crc_auto_source(struct intel_display *display, enum pipe pipe, enum intel_pipe_crc_source *source) { @@ -85,8 +85,8 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, *source = INTEL_PIPE_CRC_SOURCE_PIPE; - drm_modeset_lock_all(&dev_priv->drm); - for_each_intel_encoder(&dev_priv->drm, encoder) { + drm_modeset_lock_all(display->drm); + for_each_intel_encoder(display->drm, encoder) { if (!encoder->base.crtc) continue; @@ -113,7 +113,7 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, *source = INTEL_PIPE_CRC_SOURCE_DP_D; break; default: - drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n", + drm_WARN(display->drm, 1, "nonexisting DP port %c\n", port_name(dig_port->base.port)); break; } @@ -122,10 +122,10 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, break; } } - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_unlock_all(display->drm); } -static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, +static int vlv_pipe_crc_ctl_reg(struct intel_display *display, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) @@ -133,7 +133,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, bool need_stable_symbols = false; if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - i9xx_pipe_crc_auto_source(dev_priv, pipe, source); + i9xx_pipe_crc_auto_source(display, pipe, source); switch (*source) { case INTEL_PIPE_CRC_SOURCE_PIPE: @@ -148,7 +148,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, need_stable_symbols = true; break; case INTEL_PIPE_CRC_SOURCE_DP_D: - if (!IS_CHERRYVIEW(dev_priv)) + if (!display->platform.cherryview) return -EINVAL; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; need_stable_symbols = true; @@ -170,7 +170,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, * - DisplayPort scrambling: used for EMI reduction */ if (need_stable_symbols) { - u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv)); + u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display)); tmp |= DC_BALANCE_RESET_VLV; switch (pipe) { @@ -186,26 +186,26 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, default: return -EINVAL; } - intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp); + intel_de_write(display, PORT_DFT2_G4X(display), tmp); } return 0; } -static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, +static int i9xx_pipe_crc_ctl_reg(struct intel_display *display, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - i9xx_pipe_crc_auto_source(dev_priv, pipe, source); + i9xx_pipe_crc_auto_source(display, pipe, source); switch (*source) { case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; break; case INTEL_PIPE_CRC_SOURCE_TV: - if (!SUPPORTS_TV(dev_priv)) + if (!SUPPORTS_TV(display)) return -EINVAL; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; break; @@ -229,10 +229,10 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return 0; } -static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, +static void vlv_undo_pipe_scramble_reset(struct intel_display *display, enum pipe pipe) { - u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv)); + u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display)); switch (pipe) { case PIPE_A: @@ -249,7 +249,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, } if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) tmp &= ~DC_BALANCE_RESET_VLV; - intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp); + intel_de_write(display, PORT_DFT2_G4X(display), tmp); } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, @@ -281,18 +281,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, static void intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc_state *pipe_config; struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; int ret; - if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) - i915gm_irq_cstate_wa(dev_priv, enable); + if (display->platform.i945gm || display->platform.i915gm) + i915gm_irq_cstate_wa(display, enable); drm_modeset_acquire_init(&ctx, 0); - state = drm_atomic_state_alloc(&dev_priv->drm); + state = drm_atomic_state_alloc(display->drm); if (!state) { ret = -ENOMEM; goto unlock; @@ -311,7 +311,7 @@ retry: pipe_config->uapi.mode_changed = pipe_config->has_psr; pipe_config->crc_enabled = enable; - if (IS_HASWELL(dev_priv) && + if (display->platform.haswell && pipe_config->hw.active && crtc->pipe == PIPE_A && pipe_config->cpu_transcoder == TRANSCODER_EDP) pipe_config->uapi.mode_changed = true; @@ -327,13 +327,13 @@ put_state: drm_atomic_state_put(state); unlock: - drm_WARN(&dev_priv->drm, ret, + drm_WARN(display->drm, ret, "Toggling workaround to %i returns %i\n", enable, ret); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } -static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, +static int ivb_pipe_crc_ctl_reg(struct intel_display *display, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) @@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return 0; } -static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, +static int skl_pipe_crc_ctl_reg(struct intel_display *display, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) @@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return 0; } -static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, +static int get_new_crc_ctl_reg(struct intel_display *display, enum pipe pipe, enum intel_pipe_crc_source *source, u32 *val) { - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(display) == 2) return i8xx_pipe_crc_ctl_reg(source, val); - else if (DISPLAY_VER(dev_priv) < 5) - return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val); - else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv)) + else if (DISPLAY_VER(display) < 5) + return i9xx_pipe_crc_ctl_reg(display, pipe, source, val); + else if (display->platform.valleyview || display->platform.cherryview) + return vlv_pipe_crc_ctl_reg(display, pipe, source, val); + else if (display->platform.ironlake || display->platform.sandybridge) return ilk_pipe_crc_ctl_reg(source, val); - else if (DISPLAY_VER(dev_priv) < 9) - return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val); + else if (DISPLAY_VER(display) < 9) + return ivb_pipe_crc_ctl_reg(display, pipe, source, val); else - return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val); + return skl_pipe_crc_ctl_reg(display, pipe, source, val); } static int @@ -447,7 +447,7 @@ void intel_crtc_crc_init(struct intel_crtc *crtc) spin_lock_init(&pipe_crc->lock); } -static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv, +static int i8xx_crc_source_valid(struct intel_display *display, const enum intel_pipe_crc_source source) { switch (source) { @@ -459,7 +459,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv, } } -static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv, +static int i9xx_crc_source_valid(struct intel_display *display, const enum intel_pipe_crc_source source) { switch (source) { @@ -472,7 +472,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv, } } -static int vlv_crc_source_valid(struct drm_i915_private *dev_priv, +static int vlv_crc_source_valid(struct intel_display *display, const enum intel_pipe_crc_source source) { switch (source) { @@ -487,7 +487,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv, } } -static int ilk_crc_source_valid(struct drm_i915_private *dev_priv, +static int ilk_crc_source_valid(struct intel_display *display, const enum intel_pipe_crc_source source) { switch (source) { @@ -501,7 +501,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv, } } -static int ivb_crc_source_valid(struct drm_i915_private *dev_priv, +static int ivb_crc_source_valid(struct intel_display *display, const enum intel_pipe_crc_source source) { switch (source) { @@ -515,7 +515,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv, } } -static int skl_crc_source_valid(struct drm_i915_private *dev_priv, +static int skl_crc_source_valid(struct intel_display *display, const enum intel_pipe_crc_source source) { switch (source) { @@ -535,21 +535,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv, } static int -intel_is_valid_crc_source(struct drm_i915_private *dev_priv, +intel_is_valid_crc_source(struct intel_display *display, const enum intel_pipe_crc_source source) { - if (DISPLAY_VER(dev_priv) == 2) - return i8xx_crc_source_valid(dev_priv, source); - else if (DISPLAY_VER(dev_priv) < 5) - return i9xx_crc_source_valid(dev_priv, source); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return vlv_crc_source_valid(dev_priv, source); - else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv)) - return ilk_crc_source_valid(dev_priv, source); - else if (DISPLAY_VER(dev_priv) < 9) - return ivb_crc_source_valid(dev_priv, source); + if (DISPLAY_VER(display) == 2) + return i8xx_crc_source_valid(display, source); + else if (DISPLAY_VER(display) < 5) + return i9xx_crc_source_valid(display, source); + else if (display->platform.valleyview || display->platform.cherryview) + return vlv_crc_source_valid(display, source); + else if (display->platform.ironlake || display->platform.sandybridge) + return ilk_crc_source_valid(display, source); + else if (DISPLAY_VER(display) < 9) + return ivb_crc_source_valid(display, source); else - return skl_crc_source_valid(dev_priv, source); + return skl_crc_source_valid(display, source); } const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc, @@ -562,16 +562,16 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc, int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); enum intel_pipe_crc_source source; if (display_crc_ctl_parse_source(source_name, &source) < 0) { - drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name); + drm_dbg_kms(display->drm, "unknown source %s\n", source_name); return -EINVAL; } if (source == INTEL_PIPE_CRC_SOURCE_AUTO || - intel_is_valid_crc_source(dev_priv, source) == 0) { + intel_is_valid_crc_source(display, source) == 0) { *values_cnt = 5; return 0; } @@ -583,7 +583,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name) { struct intel_crtc *crtc = to_intel_crtc(_crtc); struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; @@ -594,14 +593,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name) bool enable; if (display_crc_ctl_parse_source(source_name, &source) < 0) { - drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name); + drm_dbg_kms(display->drm, "unknown source %s\n", source_name); return -EINVAL; } power_domain = POWER_DOMAIN_PIPE(pipe); wakeref = intel_display_power_get_if_enabled(display, power_domain); if (!wakeref) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Trying to capture CRC while pipe is off\n"); return -EIO; } @@ -610,17 +609,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name) if (enable) intel_crtc_crc_setup_workarounds(crtc, true); - ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val); + ret = get_new_crc_ctl_reg(display, pipe, &source, &val); if (ret != 0) goto out; pipe_crc->source = source; - intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val); - intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe)); + intel_de_write(display, PIPE_CRC_CTL(display, pipe), val); + intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe)); if (!source) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_undo_pipe_scramble_reset(dev_priv, pipe); + if (display->platform.valleyview || display->platform.cherryview) + vlv_undo_pipe_scramble_reset(display, pipe); } pipe_crc->skipped = 0; @@ -636,7 +635,7 @@ out: void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; enum pipe pipe = crtc->pipe; u32 val = 0; @@ -644,19 +643,20 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) if (!crtc->base.crc.opened) return; - if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0) + if (get_new_crc_ctl_reg(display, pipe, &pipe_crc->source, &val) < 0) return; /* Don't need pipe_crc->lock here, IRQs are not generated. */ pipe_crc->skipped = 0; - intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val); - intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe)); + intel_de_write(display, PIPE_CRC_CTL(display, pipe), val); + intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe)); } void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; enum pipe pipe = crtc->pipe; @@ -665,7 +665,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc) pipe_crc->skipped = INT_MIN; spin_unlock_irq(&pipe_crc->lock); - intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), 0); - intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe)); + intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0); + intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe)); intel_synchronize_irq(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index b1675b46e06c..b0c4892775ce 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -52,44 +52,55 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this, return false; } +static enum intel_memory_type +initial_plane_memory_type(struct drm_i915_private *i915) +{ + if (IS_DGFX(i915)) + return INTEL_MEMORY_LOCAL; + else if (HAS_LMEMBAR_SMEM_STOLEN(i915)) + return INTEL_MEMORY_STOLEN_LOCAL; + else + return INTEL_MEMORY_STOLEN_SYSTEM; +} + static bool -initial_plane_phys_lmem(struct intel_display *display, - struct intel_initial_plane_config *plane_config) +initial_plane_phys(struct intel_display *display, + struct intel_initial_plane_config *plane_config) { struct drm_i915_private *i915 = to_i915(display->drm); - gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm; + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct intel_memory_region *mem; + enum intel_memory_type mem_type; + bool is_present, is_local; dma_addr_t dma_addr; - gen8_pte_t pte; u32 base; + mem_type = initial_plane_memory_type(i915); + mem = intel_memory_region_by_type(i915, mem_type); + if (!mem) { + drm_dbg_kms(display->drm, + "Initial plane memory region (type %s) not initialized\n", + intel_memory_type_str(mem_type)); + return false; + } + base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); - gte += base / I915_GTT_PAGE_SIZE; + dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local); - pte = ioread64(gte); - if (!(pte & GEN12_GGTT_PTE_LM)) { + if (!is_present) { drm_err(display->drm, - "Initial plane programming missing PTE_LM bit\n"); + "Initial plane FB PTE not present\n"); return false; } - dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK; - - if (IS_DGFX(i915)) - mem = i915->mm.regions[INTEL_REGION_LMEM_0]; - else - mem = i915->mm.stolen_region; - if (!mem) { - drm_dbg_kms(display->drm, - "Initial plane memory region not initialized\n"); + if (intel_memory_type_is_local(mem->type) != is_local) { + drm_err(display->drm, + "Initial plane FB PTE unsuitable for %s\n", + mem->region.name); return false; } - /* - * On lmem we don't currently expect this to - * ever be placed in the stolen portion. - */ if (dma_addr < mem->region.start || dma_addr > mem->region.end) { drm_err(display->drm, "Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n", @@ -107,42 +118,6 @@ initial_plane_phys_lmem(struct intel_display *display, return true; } -static bool -initial_plane_phys_smem(struct intel_display *display, - struct intel_initial_plane_config *plane_config) -{ - struct drm_i915_private *i915 = to_i915(display->drm); - struct intel_memory_region *mem; - u32 base; - - base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT); - - mem = i915->mm.stolen_region; - if (!mem) { - drm_dbg_kms(display->drm, - "Initial plane memory region not initialized\n"); - return false; - } - - /* FIXME get and validate the dma_addr from the PTE */ - plane_config->phys_base = base; - plane_config->mem = mem; - - return true; -} - -static bool -initial_plane_phys(struct intel_display *display, - struct intel_initial_plane_config *plane_config) -{ - struct drm_i915_private *i915 = to_i915(display->drm); - - if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915)) - return initial_plane_phys_lmem(display, plane_config); - else - return initial_plane_phys_smem(display, plane_config); -} - static struct i915_vma * initial_plane_vma(struct intel_display *display, struct intel_initial_plane_config *plane_config) diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index 63301a01906c..d22b5469672d 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -5,6 +5,8 @@ #include <linux/bitops.h> +#include <drm/drm_print.h> + #include "i915_reg.h" #include "i915_utils.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 617ce4993172..4d4e2b9f5f2d 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -91,7 +91,6 @@ static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps.vlv_pps_pipe; bool pll_enabled, release_cl_override = false; @@ -134,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) release_cl_override = display->platform.cherryview && !chv_phy_powergate_ch(display, phy, ch, true); - if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) { + if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) { drm_err(display->drm, "Failed to force on PLL for pipe %c!\n", pipe_name(pipe)); @@ -158,7 +157,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) intel_de_posting_read(display, intel_dp->output_reg); if (!pll_enabled) { - vlv_force_pll_off(dev_priv, pipe); + vlv_force_pll_off(display, pipe); if (release_cl_override) chv_phy_powergate_ch(display, phy, ch, false); @@ -744,11 +743,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->pps.want_panel_vdd; - lockdep_assert_held(&display->pps.mutex); - if (!intel_dp_is_edp(intel_dp)) return false; + lockdep_assert_held(&display->pps.mutex); + cancel_delayed_work(&intel_dp->pps.panel_vdd_work); intel_dp->pps.want_panel_vdd = true; @@ -925,11 +924,11 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { struct intel_display *display = to_intel_display(intel_dp); - lockdep_assert_held(&display->pps.mutex); - if (!intel_dp_is_edp(intel_dp)) return; + lockdep_assert_held(&display->pps.mutex); + INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s VDD not forced on", dp_to_dig_port(intel_dp)->base.base.base.id, @@ -1855,7 +1854,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) switch (port_sel) { case PANEL_PORT_SELECT_LVDS: - intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); + intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe); break; case PANEL_PORT_SELECT_DPA: g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe); @@ -1883,7 +1882,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe) drm_WARN_ON(display->drm, port_sel != PANEL_PORT_SELECT_LVDS); - intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); + intel_lvds_port_enabled(display, LVDS, &panel_pipe); } val = intel_de_read(display, pp_reg); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 4e938bad808c..eef48c014112 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -36,6 +36,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_irq.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -463,8 +464,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) if (DISPLAY_VER(display) >= 9) { u32 val; - val = intel_de_rmw(dev_priv, - PSR_EVENT(dev_priv, cpu_transcoder), + val = intel_de_rmw(display, + PSR_EVENT(display, cpu_transcoder), 0, 0); psr_event_print(display, val, intel_dp->psr.sel_update_enabled); @@ -689,7 +690,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 aux_clock_divider, aux_ctl; /* write DP_SET_POWER=D0 */ @@ -704,7 +704,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) - intel_de_write(dev_priv, + intel_de_write(display, psr_aux_data_reg(display, cpu_transcoder, i >> 2), intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i)); @@ -839,7 +839,6 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 val = 0; if (DISPLAY_VER(display) >= 11) @@ -873,7 +872,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) * WA 0479: hsw,bdw * "Do not skip both TP1 and TP2/TP3" */ - if (DISPLAY_VER(dev_priv) < 9 && + if (DISPLAY_VER(display) < 9 && connector->panel.vbt.psr.tp1_wakeup_time_us == 0 && connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) val |= EDP_PSR_TP2_TP3_TIME_100us; @@ -909,7 +908,6 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) static void hsw_activate_psr1(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 max_sleep_time = 0x1f; u32 val = EDP_PSR_ENABLE; @@ -919,7 +917,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (DISPLAY_VER(display) < 20) val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time); - if (IS_HASWELL(dev_priv)) + if (display->platform.haswell) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; if (intel_dp->psr.link_standby) @@ -1013,14 +1011,13 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp) static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val = EDP_PSR2_ENABLE; u32 psr_val = 0; val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); - if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv)) + if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p) val |= EDP_SU_TRACK_ENABLE; if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13) @@ -1038,7 +1035,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) } /* Wa_22012278275:adl-p */ - if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) { + if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) { static const u8 map[] = { 2, /* 5 lines */ 1, /* 6 lines */ @@ -1103,9 +1100,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) + if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B; else if (DISPLAY_VER(display) >= 12) return cpu_transcoder == TRANSCODER_A; @@ -1183,10 +1178,9 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp, struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - struct drm_i915_private *dev_priv = to_i915(display->drm); enum port port = dig_port->base.port; - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) + if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) return pipe <= PIPE_B && port <= PORT_B; else return pipe == PIPE_A && port == PORT_A; @@ -1197,7 +1191,6 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; struct i915_power_domains *power_domains = &display->power.domains; u32 exit_scanlines; @@ -1223,7 +1216,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, return; /* Wa_16011303918:adl-p */ - if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) + if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) return; /* @@ -1264,7 +1257,6 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; @@ -1286,7 +1278,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp, * For other platforms with SW tracking we can adjust the y coordinates * to match sink requirement if multiple of 4. */ - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) + if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) y_granularity = intel_dp->psr.su_y_granularity; else if (intel_dp->psr.su_y_granularity <= 2) y_granularity = 4; @@ -1412,7 +1404,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; @@ -1421,20 +1412,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; /* JSL and EHL only supports eDP 1.3 */ - if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) { + if (display->platform.jasperlake || display->platform.elkhartlake) { drm_dbg_kms(display->drm, "PSR2 not supported by phy\n"); return false; } /* Wa_16011181250 */ - if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) || - IS_DG2(dev_priv)) { + if (display->platform.rocketlake || display->platform.alderlake_s || + display->platform.dg2) { drm_dbg_kms(display->drm, "PSR2 is defeatured for this platform\n"); return false; } - if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { + if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { drm_dbg_kms(display->drm, "PSR2 not completely functional in this stepping\n"); return false; @@ -1453,7 +1444,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * over PSR2. */ if (crtc_state->dsc.compression_enable && - (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) { + (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) { drm_dbg_kms(display->drm, "PSR2 cannot be enabled since DSC is enabled\n"); return false; @@ -1486,7 +1477,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, /* Wa_16011303918:adl-p */ if (crtc_state->vrr.enable && - IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { + display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) { drm_dbg_kms(display->drm, "PSR2 not enabled, not compatible with HW stepping + VRR\n"); return false; @@ -1604,6 +1595,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, return false; } + if (crtc_state->crc_enabled) { + drm_dbg_kms(display->drm, + "Panel Replay not enabled because it would inhibit pipe CRC calculation\n"); + return false; + } + if (!intel_dp_is_edp(intel_dp)) return true; @@ -1634,12 +1631,6 @@ _panel_replay_compute_config(struct intel_dp *intel_dp, if (!alpm_config_valid(intel_dp, crtc_state, true)) return false; - if (crtc_state->crc_enabled) { - drm_dbg_kms(display->drm, - "Panel Replay not enabled because it would inhibit pipe CRC calculation\n"); - return false; - } - return true; } @@ -1827,7 +1818,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask = 0; @@ -1866,7 +1856,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * As a workaround leave LPSP unmasked to prevent PSR entry * when external displays are active. */ - if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv)) + if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult) mask |= EDP_PSR_DEBUG_MASK_LPSP; if (DISPLAY_VER(display) < 20) @@ -1880,7 +1870,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; /* allow PSR with sprite enabled */ - if (IS_HASWELL(dev_priv)) + if (display->platform.haswell) mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE; } @@ -1925,7 +1915,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, */ if (!intel_dp->psr.panel_replay_enabled && (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || - IS_ALDERLAKE_P(dev_priv))) + display->platform.alderlake_p)) intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); @@ -1936,7 +1926,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), 0, MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS); - else if (IS_ALDERLAKE_P(dev_priv)) + else if (display->platform.alderlake_p) intel_de_rmw(display, CLKGATE_DIS_MISC, 0, CLKGATE_DIS_MISC_DMASC_GATING_DIS); } @@ -2024,7 +2014,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_psr_enable_source(intel_dp, crtc_state); intel_dp->psr.enabled = true; - intel_dp->psr.paused = false; + intel_dp->psr.pause_counter = 0; /* * Link_ok is sticky and set here on PSR enable. We can assume link @@ -2104,7 +2094,6 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp) static void intel_psr_disable_locked(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; lockdep_assert_held(&intel_dp->psr.lock); @@ -2136,7 +2125,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_de_rmw(display, MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder), MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0); - else if (IS_ALDERLAKE_P(dev_priv)) + else if (display->platform.alderlake_p) intel_de_rmw(display, CLKGATE_DIS_MISC, CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); } @@ -2210,7 +2199,6 @@ void intel_psr_disable(struct intel_dp *intel_dp, */ void intel_psr_pause(struct intel_dp *intel_dp) { - struct intel_display *display = to_intel_display(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) @@ -2223,12 +2211,10 @@ void intel_psr_pause(struct intel_dp *intel_dp) return; } - /* If we ever hit this, we will need to add refcount to pause/resume */ - drm_WARN_ON(display->drm, psr->paused); - - intel_psr_exit(intel_dp); - intel_psr_wait_exit_locked(intel_dp); - psr->paused = true; + if (intel_dp->psr.pause_counter++ == 0) { + intel_psr_exit(intel_dp); + intel_psr_wait_exit_locked(intel_dp); + } mutex_unlock(&psr->lock); @@ -2244,6 +2230,7 @@ void intel_psr_pause(struct intel_dp *intel_dp) */ void intel_psr_resume(struct intel_dp *intel_dp) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) @@ -2251,13 +2238,18 @@ void intel_psr_resume(struct intel_dp *intel_dp) mutex_lock(&psr->lock); - if (!psr->paused) - goto unlock; + if (!psr->enabled) + goto out; - psr->paused = false; - intel_psr_activate(intel_dp); + if (!psr->pause_counter) { + drm_warn(display->drm, "Unbalanced PSR pause/resume!\n"); + goto out; + } -unlock: + if (--intel_dp->psr.pause_counter == 0) + intel_psr_activate(intel_dp); + +out: mutex_unlock(&psr->lock); } @@ -2314,35 +2306,27 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb, static u32 man_trk_ctl_enable_bit_get(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 : + return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 : PSR2_MAN_TRK_CTL_ENABLE; } static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? + return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME : PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; } static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? + return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE : PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; } static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - - return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? + return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME : PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME; } @@ -2405,8 +2389,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, bool full_update) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = man_trk_ctl_enable_bit_get(display); /* SF partial frame enable has to be set even on full update */ @@ -2420,7 +2402,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, if (crtc_state->psr2_su_area.y1 == -1) goto exit; - if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) { + if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) { val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1); val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1); } else { @@ -2474,13 +2456,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area, static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; u16 y_alignment; /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ if (crtc_state->dsc.compression_enable && - (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)) + (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)) y_alignment = vdsc_cfg->slice_height; else y_alignment = crtc_state->su_y_granularity; @@ -2601,12 +2582,11 @@ static void intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); /* Wa_14014971492 */ if (!crtc_state->has_panel_replay && ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) || - IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) && + display->platform.alderlake_p || display->platform.tigerlake)) && crtc_state->splitter.enable) crtc_state->psr2_su_area.y1 = 0; @@ -2807,7 +2787,6 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = @@ -2839,7 +2818,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, new_crtc_state->has_sel_update != psr->sel_update_enabled || new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled || new_crtc_state->has_panel_replay != psr->panel_replay_enabled || - (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled)) + (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled)) intel_psr_disable_locked(intel_dp); else if (new_crtc_state->wm_level_disabled) /* Wa_14015648006 */ @@ -3322,7 +3301,7 @@ void intel_psr_flush(struct intel_display *display, * we have to ensure that the PSR is not activated until * intel_psr_resume() is called. */ - if (intel_dp->psr.paused) + if (intel_dp->psr.pause_counter) goto unlock; if (origin == ORIGIN_FLIP || @@ -3634,8 +3613,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) const char *status = "unknown"; u32 val, status_val; - if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || - intel_dp->psr.panel_replay_enabled)) { + if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) && + (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) { static const char * const live_status[] = { "IDLE", "CAPTURE", @@ -3728,10 +3707,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp, static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct intel_psr *psr = &intel_dp->psr; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; bool enabled; u32 val, psr2_ctl; @@ -3740,7 +3718,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) if (!(psr->sink_support || psr->sink_panel_replay_support)) return 0; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); mutex_lock(&psr->lock); intel_psr_print_mode(intel_dp, m); @@ -3822,7 +3800,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) unlock: mutex_unlock(&psr->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } @@ -3853,9 +3831,7 @@ static int i915_edp_psr_debug_set(void *data, u64 val) { struct intel_display *display = data; - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - intel_wakeref_t wakeref; int ret = -ENODEV; if (!HAS_PSR(display)) @@ -3866,12 +3842,9 @@ i915_edp_psr_debug_set(void *data, u64 val) drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - // TODO: split to each transcoder's PSR debug state - ret = intel_psr_debug_set(intel_dp, val); - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + with_intel_display_rpm(display) + ret = intel_psr_debug_set(intel_dp, val); } return ret; diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 6e2d9929b4d7..757b9ce7e3b1 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2036,7 +2036,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) struct intel_display *display = to_intel_display(&intel_sdvo->base); u16 hotplug; - if (!I915_HAS_HOTPLUG(display)) + if (!HAS_HOTPLUG(display)) return 0; /* diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index b9acd9fe160c..2b53ac9f4935 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -5,6 +5,8 @@ #include <linux/math.h> +#include <drm/drm_print.h> + #include "i915_reg.h" #include "i915_utils.h" #include "intel_ddi.h" diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index b8d14ed8a56e..c1014e74791f 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -3,8 +3,10 @@ * Copyright © 2019 Intel Corporation */ -#include "i915_drv.h" +#include <drm/drm_print.h> + #include "i915_reg.h" +#include "i915_utils.h" #include "intel_atomic.h" #include "intel_cx0_phy_regs.h" #include "intel_ddi.h" @@ -92,11 +94,6 @@ static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port) return dig_port->tc; } -static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) -{ - return to_i915(tc->dig_port->base.base.dev); -} - static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode) { @@ -219,10 +216,11 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma static void tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref) { + struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port); enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain); + drm_WARN_ON(display->drm, tc->lock_power_domain != domain); #endif __tc_cold_unblock(tc, domain, wakeref); } @@ -266,13 +264,13 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc) static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); u32 lane_mask; - lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); + lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia)); - drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); + drm_WARN_ON(display->drm, lane_mask == 0xffffffff); assert_tc_cold_blocked(tc); lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx); @@ -281,13 +279,13 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); u32 pin_mask; - pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia)); + pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia)); - drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); + drm_WARN_ON(display->drm, pin_mask == 0xffffffff); assert_tc_cold_blocked(tc); return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> @@ -297,13 +295,12 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) { struct intel_display *display = to_intel_display(dig_port); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); intel_wakeref_t wakeref; u32 val, pin_assignment; with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) - val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); + val = intel_de_read(display, TCSS_DDI_STATUS(tc_port)); pin_assignment = REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val); @@ -369,7 +366,7 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) @@ -377,10 +374,10 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) assert_tc_cold_blocked(tc); - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(display) >= 20) return lnl_tc_port_get_max_lane_count(dig_port); - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) return mtl_tc_port_get_max_lane_count(dig_port); return intel_tc_port_get_max_lane_count(dig_port); @@ -389,20 +386,20 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); bool lane_reversal = dig_port->lane_reversal; u32 val; - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) return; - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, lane_reversal && tc->mode != TC_PORT_LEGACY); assert_tc_cold_blocked(tc); - val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); + val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx); switch (required_lanes) { @@ -423,16 +420,16 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, MISSING_CASE(required_lanes); } - intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); + intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); } static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, u32 live_status_mask) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); u32 valid_hpd_mask; - drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); + drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED); if (hweight32(live_status_mask) != 1) return; @@ -447,7 +444,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, return; /* If live status mismatches the VBT flag, trust the live status. */ - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n", tc->port_name, live_status_mask, valid_hpd_mask); @@ -490,21 +487,20 @@ icl_tc_phy_cold_off_domain(struct intel_tc_port *tc) static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct intel_display *display = to_intel_display(tc->dig_port); - struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; - u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; + u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin]; intel_wakeref_t wakeref; u32 fia_isr; u32 pch_isr; u32 mask = 0; with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) { - fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); - pch_isr = intel_de_read(i915, SDEISR); + fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia)); + pch_isr = intel_de_read(display, SDEISR); } if (fia_isr == 0xffffffff) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY in TCCOLD, nothing connected\n", tc->port_name); return mask; @@ -531,14 +527,14 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) */ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); u32 val; assert_tc_cold_blocked(tc); - val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia)); + val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia)); if (val == 0xffffffff) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY in TCCOLD, assuming not ready\n", tc->port_name); return false; @@ -550,14 +546,14 @@ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); u32 val; assert_tc_cold_blocked(tc); - val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); + val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); if (val == 0xffffffff) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY in TCCOLD, can't %s ownership\n", tc->port_name, take ? "take" : "release"); @@ -568,21 +564,21 @@ static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, if (take) val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); - intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); + intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); return true; } static bool icl_tc_phy_is_owned(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); u32 val; assert_tc_cold_blocked(tc); - val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); + val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); if (val == 0xffffffff) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY in TCCOLD, assume not owned\n", tc->port_name); return false; @@ -619,30 +615,30 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, int required_lanes) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); struct intel_digital_port *dig_port = tc->dig_port; int max_lanes; max_lanes = intel_tc_port_max_lane_count(dig_port); if (tc->mode == TC_PORT_LEGACY) { - drm_WARN_ON(&i915->drm, max_lanes != 4); + drm_WARN_ON(display->drm, max_lanes != 4); return true; } - drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT); + drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT); /* * Now we have to re-check the live state, in case the port recently * became disconnected. Not necessary for legacy mode. */ if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) { - drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n", + drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n", tc->port_name); return false; } if (max_lanes < required_lanes) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY max lanes %d < required lanes %d\n", tc->port_name, max_lanes, required_lanes); @@ -655,7 +651,7 @@ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, static bool icl_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); tc->lock_wakeref = tc_cold_block(tc); @@ -664,8 +660,8 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, if ((!tc_phy_is_ready(tc) || !icl_tc_phy_take_ownership(tc, true)) && - !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { - drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n", + !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) { + drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n", tc->port_name, str_yes_no(tc_phy_is_ready(tc))); goto out_unblock_tc_cold; @@ -733,14 +729,13 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc) static void tgl_tc_phy_init(struct intel_tc_port *tc) { struct intel_display *display = to_intel_display(tc->dig_port); - struct drm_i915_private *i915 = tc_to_i915(tc); intel_wakeref_t wakeref; u32 val; with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) - val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1)); + val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1)); - drm_WARN_ON(&i915->drm, val == 0xffffffff); + drm_WARN_ON(display->drm, val == 0xffffffff); tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK); } @@ -775,19 +770,18 @@ adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc) static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct intel_display *display = to_intel_display(tc->dig_port); - struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; enum hpd_pin hpd_pin = dig_port->base.hpd_pin; - u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin]; - u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; + u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin]; + u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin]; intel_wakeref_t wakeref; u32 cpu_isr; u32 pch_isr; u32 mask = 0; with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) { - cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR); - pch_isr = intel_de_read(i915, SDEISR); + cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR); + pch_isr = intel_de_read(display, SDEISR); } if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK)) @@ -810,15 +804,15 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) */ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); u32 val; assert_display_core_power_enabled(tc); - val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); + val = intel_de_read(display, TCSS_DDI_STATUS(tc_port)); if (val == 0xffffffff) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY in TCCOLD, assuming not ready\n", tc->port_name); return false; @@ -830,12 +824,12 @@ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum port port = tc->dig_port->base.port; assert_tc_port_power_enabled(tc); - intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, + intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0); return true; @@ -843,13 +837,13 @@ static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum port port = tc->dig_port->base.port; u32 val; assert_tc_port_power_enabled(tc); - val = intel_de_read(i915, DDI_BUF_CTL(port)); + val = intel_de_read(display, DDI_BUF_CTL(port)); return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; } @@ -872,7 +866,6 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { struct intel_display *display = to_intel_display(tc->dig_port); - struct drm_i915_private *i915 = tc_to_i915(tc); enum intel_display_power_domain port_power_domain = tc_port_power_domain(tc); intel_wakeref_t port_wakeref; @@ -885,15 +878,15 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) port_wakeref = intel_display_power_get(display, port_power_domain); if (!adlp_tc_phy_take_ownership(tc, true) && - !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { - drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n", + !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) { + drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n", tc->port_name); goto out_put_port_power; } if (!tc_phy_is_ready(tc) && - !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { - drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", + !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) { + drm_dbg_kms(display->drm, "Port %s: PHY not ready\n", tc->port_name); goto out_release_phy; } @@ -965,19 +958,18 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = { static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc) { struct intel_display *display = to_intel_display(tc->dig_port); - struct drm_i915_private *i915 = tc_to_i915(tc); struct intel_digital_port *dig_port = tc->dig_port; enum hpd_pin hpd_pin = dig_port->base.hpd_pin; - u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin]; - u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; + u32 pica_isr_bits = display->hotplug.hpd[hpd_pin]; + u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin]; intel_wakeref_t wakeref; u32 pica_isr; u32 pch_isr; u32 mask = 0; with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) { - pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR); - pch_isr = intel_de_read(i915, SDEISR); + pica_isr = intel_de_read(display, PICAINTERRUPT_ISR); + pch_isr = intel_de_read(display, SDEISR); } if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK)) @@ -994,22 +986,22 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc) static bool xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum port port = tc->dig_port->base.port; - i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port); assert_tc_cold_blocked(tc); - return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE; + return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE; } static bool xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: timeout waiting for TCSS power to get %s\n", str_enabled_disabled(enabled), tc->port_name); @@ -1069,7 +1061,7 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); __xelpdp_tc_phy_enable_tcss_power(tc, enable); @@ -1082,7 +1074,7 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl return true; out_disable: - if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) + if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) return false; if (!enable) @@ -1096,35 +1088,35 @@ out_disable: static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum port port = tc->dig_port->base.port; - i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port); u32 val; assert_tc_cold_blocked(tc); - val = intel_de_read(i915, reg); + val = intel_de_read(display, reg); if (take) val |= XELPDP_TC_PHY_OWNERSHIP; else val &= ~XELPDP_TC_PHY_OWNERSHIP; - intel_de_write(i915, reg, val); + intel_de_write(display, reg, val); } static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum port port = tc->dig_port->base.port; - i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port); + i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port); assert_tc_cold_blocked(tc); - return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP; + return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP; } static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); intel_wakeref_t tc_cold_wref; enum intel_display_power_domain domain; @@ -1134,7 +1126,7 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) if (tc->mode != TC_PORT_DISCONNECTED) tc->lock_wakeref = tc_cold_block(tc); - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && !xelpdp_tc_phy_tcss_power_is_enabled(tc)); @@ -1207,13 +1199,13 @@ tc_phy_cold_off_domain(struct intel_tc_port *tc) static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); u32 mask; mask = tc->phy_ops->hpd_live_status(tc); /* The sink can be connected only in a single mode. */ - drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1); + drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1); return mask; } @@ -1236,9 +1228,9 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc) static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, bool phy_is_ready, bool phy_is_owned) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); - drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready); + drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); return phy_is_ready && phy_is_owned; } @@ -1246,8 +1238,7 @@ static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, static bool tc_phy_is_connected(struct intel_tc_port *tc, enum icl_port_dpll_id port_pll_type) { - struct intel_encoder *encoder = &tc->dig_port->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(tc->dig_port); bool phy_is_ready = tc_phy_is_ready(tc); bool phy_is_owned = tc_phy_is_owned(tc); bool is_connected; @@ -1257,7 +1248,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, else is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n", tc->port_name, str_yes_no(is_connected), @@ -1270,10 +1261,10 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, static bool tc_phy_wait_for_ready(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); if (wait_for(tc_phy_is_ready(tc), 500)) { - drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", + drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n", tc->port_name); return false; @@ -1343,7 +1334,7 @@ get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc, static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc); bool phy_is_ready; bool phy_is_owned; @@ -1363,11 +1354,11 @@ tc_phy_get_current_mode(struct intel_tc_port *tc) if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); } else { - drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT); + drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT); mode = get_tc_mode_in_phy_owned_state(tc, live_mode); } - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n", tc->port_name, tc_port_mode_name(mode), @@ -1407,7 +1398,7 @@ tc_phy_get_target_mode(struct intel_tc_port *tc) static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); u32 live_status_mask = tc_phy_hpd_live_status(tc); bool connected; @@ -1421,7 +1412,7 @@ static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) connected = tc->phy_ops->connect(tc, required_lanes); } - drm_WARN_ON(&i915->drm, !connected); + drm_WARN_ON(display->drm, !connected); } static void tc_phy_disconnect(struct intel_tc_port *tc) @@ -1491,12 +1482,12 @@ static void __intel_tc_port_put_link(struct intel_tc_port *tc) static bool tc_port_is_enabled(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); struct intel_digital_port *dig_port = tc->dig_port; assert_tc_port_power_enabled(tc); - return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) & + return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) & DDI_BUF_CTL_ENABLE; } @@ -1509,15 +1500,15 @@ static bool tc_port_is_enabled(struct intel_tc_port *tc) */ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); bool update_mode = false; mutex_lock(&tc->lock); - drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); - drm_WARN_ON(&i915->drm, tc->lock_wakeref); - drm_WARN_ON(&i915->drm, tc->link_refcount); + drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED); + drm_WARN_ON(display->drm, tc->lock_wakeref); + drm_WARN_ON(display->drm, tc->link_refcount); tc_phy_get_hw_state(tc); /* @@ -1540,8 +1531,8 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) if (!tc_port_is_enabled(tc)) { update_mode = true; } else if (tc->mode == TC_PORT_DISCONNECTED) { - drm_WARN_ON(&i915->drm, !tc->legacy_port); - drm_err(&i915->drm, + drm_WARN_ON(display->drm, !tc->legacy_port); + drm_err(display->drm, "Port %s: PHY disconnected on enabled port, connecting it\n", tc->port_name); update_mode = true; @@ -1556,28 +1547,28 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) mutex_unlock(&tc->lock); } -static bool tc_port_has_active_links(struct intel_tc_port *tc, - const struct intel_crtc_state *crtc_state) +static bool tc_port_has_active_streams(struct intel_tc_port *tc, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); struct intel_digital_port *dig_port = tc->dig_port; enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT; - int active_links = 0; + int active_streams = 0; if (dig_port->dp.is_mst) { /* TODO: get the PLL type for MST, once HW readout is done for it. */ - active_links = intel_dp_mst_encoder_active_links(dig_port); + active_streams = intel_dp_mst_active_streams(&dig_port->dp); } else if (crtc_state && crtc_state->hw.active) { pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state); - active_links = 1; + active_streams = 1; } - if (active_links && !tc_phy_is_connected(tc, pll_type)) - drm_err(&i915->drm, - "Port %s: PHY disconnected with %d active link(s)\n", - tc->port_name, active_links); + if (active_streams && !tc_phy_is_connected(tc, pll_type)) + drm_err(display->drm, + "Port %s: PHY disconnected with %d active stream(s)\n", + tc->port_name, active_streams); - return active_links; + return active_streams; } /** @@ -1595,13 +1586,13 @@ static bool tc_port_has_active_links(struct intel_tc_port *tc, void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); mutex_lock(&tc->lock); - drm_WARN_ON(&i915->drm, tc->link_refcount != 1); - if (!tc_port_has_active_links(tc, crtc_state)) { + drm_WARN_ON(display->drm, tc->link_refcount != 1); + if (!tc_port_has_active_streams(tc, crtc_state)) { /* * TBT-alt is the default mode in any case the PHY ownership is not * held (regardless of the sink's connected live state), so @@ -1610,7 +1601,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, */ if (tc->init_mode != TC_PORT_TBT_ALT && tc->init_mode != TC_PORT_DISCONNECTED) - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", tc->port_name, tc_port_mode_name(tc->init_mode)); @@ -1618,7 +1609,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, __intel_tc_port_put_link(tc); } - drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", + drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n", tc->port_name, tc_port_mode_name(tc->mode)); @@ -1637,12 +1628,12 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, */ bool intel_tc_port_connected(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_tc_port *tc = to_tc_port(dig_port); u32 mask = ~0; - drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port)); + drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port)); if (tc->mode != TC_PORT_DISCONNECTED) mask = BIT(tc->mode); @@ -1677,14 +1668,14 @@ static int reset_link_commit(struct intel_tc_port *tc, struct intel_atomic_state *state, struct drm_modeset_acquire_ctx *ctx) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); struct intel_digital_port *dig_port = tc->dig_port; struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base); struct intel_crtc *crtc; u8 pipe_mask; int ret; - ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx); + ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx); if (ret) return ret; @@ -1695,7 +1686,7 @@ static int reset_link_commit(struct intel_tc_port *tc, if (!pipe_mask) return 0; - for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { struct intel_crtc_state *crtc_state; crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); @@ -1713,13 +1704,13 @@ static int reset_link_commit(struct intel_tc_port *tc, static int reset_link(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *_state; struct intel_atomic_state *state; int ret; - _state = drm_atomic_state_alloc(&i915->drm); + _state = drm_atomic_state_alloc(display->drm); if (!_state) return -ENOMEM; @@ -1738,21 +1729,21 @@ static void intel_tc_port_link_reset_work(struct work_struct *work) { struct intel_tc_port *tc = container_of(work, struct intel_tc_port, link_reset_work.work); - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); int ret; if (!__intel_tc_port_link_needs_reset(tc)) return; - mutex_lock(&i915->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Port %s: TypeC DP-alt sink disconnected, resetting link\n", tc->port_name); ret = reset_link(tc); - drm_WARN_ON(&i915->drm, ret); + drm_WARN_ON(display->drm, ret); - mutex_unlock(&i915->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); } bool intel_tc_port_link_reset(struct intel_digital_port *dig_port) @@ -1780,7 +1771,7 @@ void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port) static void __intel_tc_port_lock(struct intel_tc_port *tc, int required_lanes) { - struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_display *display = to_intel_display(tc->dig_port); mutex_lock(&tc->lock); @@ -1790,9 +1781,8 @@ static void __intel_tc_port_lock(struct intel_tc_port *tc, intel_tc_port_update_mode(tc, required_lanes, false); - drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED); - drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT && - !tc_phy_is_owned(tc)); + drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED); + drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc)); } void intel_tc_port_lock(struct intel_digital_port *dig_port) @@ -1885,12 +1875,12 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port) int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc; enum port port = dig_port->base.port; enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); - if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) + if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE)) return -EINVAL; tc = kzalloc(sizeof(*tc), GFP_KERNEL); @@ -1900,11 +1890,11 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) dig_port->tc = tc; tc->dig_port = dig_port; - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) tc->phy_ops = &xelpdp_tc_phy_ops; - else if (DISPLAY_VER(i915) >= 13) + else if (DISPLAY_VER(display) >= 13) tc->phy_ops = &adlp_tc_phy_ops; - else if (DISPLAY_VER(i915) >= 12) + else if (DISPLAY_VER(display) >= 12) tc->phy_ops = &tgl_tc_phy_ops; else tc->phy_ops = &icl_tc_phy_ops; diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 5dbe857ea85b..2e3f3f0207e8 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1594,7 +1594,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Disable TV interrupts around load detect or we'll recurse */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irq(&dev_priv->irq_lock); - i915_disable_pipestat(dev_priv, 0, + i915_disable_pipestat(display, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); @@ -1669,7 +1669,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, 0, + i915_enable_pipestat(display, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 7b240ce681a0..139fa5deba80 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -224,12 +224,13 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) */ if (DISPLAY_VER(display) >= 20 || display->platform.battlemage) return 1; - else if (DISPLAY_VER(display) == 2) - return -1; - else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return 2; - else + else if (DISPLAY_VER(display) >= 9 || + display->platform.broadwell || display->platform.haswell) + return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ? 2 : 1; + else if (DISPLAY_VER(display) >= 3) return 1; + else + return -1; } /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 3ed64c17bdff..8e799e225af1 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -9,6 +9,7 @@ #include <drm/display/drm_dsc_helper.h> #include <drm/drm_fixed.h> +#include <drm/drm_print.h> #include "i915_utils.h" #include "intel_crtc.h" @@ -259,6 +260,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config return 0; } +static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state) +{ + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + + return vdsc_cfg->dsc_version_major == 1 && + vdsc_cfg->dsc_version_minor == 1 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI); +} + int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(pipe_config); @@ -317,8 +327,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) * From XE_LPD onwards we supports compression bpps in steps of 1 * upto uncompressed bpp-1, hence add calculations for all the rc * parameters + * + * We don't want to calculate all rc parameters when the panel + * is MIPI DSI and it's using DSC 1.1. The reason being that some + * DSI panels vendors have hardcoded PPS params in the VBT causing + * the parameters sent from the source which are derived through + * interpolation to differ from the params the panel expects. + * This causes a noise in the display. + * Furthermore for DSI panels we are currently using bits_per_pixel + * (compressed bpp) hardcoded from VBT, (unlike other encoders where we + * find the optimum compressed bpp) so dont need to rely on interpolation, + * as we can get the required rc parameters from the tables. */ - if (DISPLAY_VER(display) >= 13) { + if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) { calculate_rc_params(vdsc_cfg); } else { if ((compressed_bpp == 8 || diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index cac49319026d..633a66f6b73b 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -4,6 +4,8 @@ * */ +#include <drm/drm_print.h> + #include "i915_reg.h" #include "intel_de.h" #include "intel_display_types.h" @@ -32,6 +34,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector) return false; fallthrough; case DRM_MODE_CONNECTOR_DisplayPort: + if (connector->mst.dp) + return false; intel_dp = intel_attached_dp(connector); if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd)) @@ -182,7 +186,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state) int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line; struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - if (!HAS_CMRR(display)) + /* Avoid CMRR for now till we have VRR with fixed timings working */ + if (!HAS_CMRR(display) || true) return false; actual_refresh_k = @@ -222,6 +227,121 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required) return vtotal; } +static +void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state) +{ + crtc_state->cmrr.enable = true; + /* + * TODO: Compute precise target refresh rate to determine + * if video_mode_required should be true. Currently set to + * false due to uncertainty about the precise target + * refresh Rate. + */ + crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false); + crtc_state->vrr.vmin = crtc_state->vrr.vmax; + crtc_state->vrr.flipline = crtc_state->vrr.vmin; + crtc_state->mode_flags |= I915_MODE_FLAG_VRR; +} + +static +void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state) +{ + crtc_state->vrr.enable = true; + crtc_state->mode_flags |= I915_MODE_FLAG_VRR; +} + +/* + * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to + * Vtotal value. + */ +static +int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal; + + if (DISPLAY_VER(display) >= 13) + return crtc_vtotal; + else + return crtc_vtotal - + intel_vrr_real_vblank_delay(crtc_state); +} + +static +int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_fixed_rr_vtotal(crtc_state); +} + +static +int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + return intel_vrr_fixed_rr_vtotal(crtc_state) - + intel_vrr_flipline_offset(display); +} + +static +int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_fixed_rr_vtotal(crtc_state); +} + +void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!intel_vrr_possible(crtc_state)) + return; + + intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), + intel_vrr_fixed_rr_vmin(crtc_state) - 1); + intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), + intel_vrr_fixed_rr_vmax(crtc_state) - 1); + intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), + intel_vrr_fixed_rr_flipline(crtc_state) - 1); +} + +static +void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state) +{ + /* + * For fixed rr, vmin = vmax = flipline. + * vmin is already set to crtc_vtotal set vmax and flipline the same. + */ + crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal; + crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal; +} + +static +int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state) +{ + /* + * To make fixed rr and vrr work seamless the guardband/pipeline full + * should be set such that it satisfies both the fixed and variable + * timings. + * For this set the vmin as crtc_vtotal. With this we never need to + * change anything to do with the guardband. + */ + return crtc_state->hw.adjusted_mode.crtc_vtotal; +} + +static +int intel_vrr_compute_vmax(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode) +{ + const struct drm_display_info *info = &connector->base.display_info; + int vmax; + + vmax = adjusted_mode->crtc_clock * 1000 / + (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq); + vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal); + + return vmax; +} + void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) @@ -232,14 +352,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct intel_dp *intel_dp = intel_attached_dp(connector); bool is_edp = intel_dp_is_edp(intel_dp); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - const struct drm_display_info *info = &connector->base.display_info; int vmin, vmax; - /* - * FIXME all joined pipes share the same transcoder. - * Need to account for that during VRR toggle/push/etc. - */ - if (crtc_state->joiner_pipes) + if (!HAS_VRR(display)) return; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -247,28 +362,40 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, crtc_state->vrr.in_range = intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode)); - if (!crtc_state->vrr.in_range) - return; - - if (HAS_LRR(display)) - crtc_state->update_lrr = true; - vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, - adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq); - vmax = adjusted_mode->crtc_clock * 1000 / - (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq); + /* + * Allow fixed refresh rate with VRR Timing Generator. + * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual + * VRR and LRR. + * #TODO For actual VRR with joiner, we need to figure out how to + * correctly sequence transcoder level stuff vs. pipe level stuff + * in the commit. + */ + if (crtc_state->joiner_pipes) + crtc_state->vrr.in_range = false; - vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal); - vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal); + vmin = intel_vrr_compute_vmin(crtc_state); - if (vmin >= vmax) - return; + if (crtc_state->vrr.in_range) { + if (HAS_LRR(display)) + crtc_state->update_lrr = true; + vmax = intel_vrr_compute_vmax(connector, adjusted_mode); + } else { + vmax = vmin; + } crtc_state->vrr.vmin = vmin; crtc_state->vrr.vmax = vmax; crtc_state->vrr.flipline = crtc_state->vrr.vmin; + if (crtc_state->uapi.vrr_enabled && vmin < vmax) + intel_vrr_compute_vrr_timings(crtc_state); + else if (is_cmrr_frac_required(crtc_state) && is_edp) + intel_vrr_compute_cmrr_timings(crtc_state); + else + intel_vrr_compute_fixed_rr_timings(crtc_state); + /* * flipline determines the min vblank length the hardware will * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce @@ -276,29 +403,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display); - /* - * When panel is VRR capable and userspace has - * not enabled adaptive sync mode then Fixed Average - * Vtotal mode should be enabled. - */ - if (crtc_state->uapi.vrr_enabled) { - crtc_state->vrr.enable = true; - crtc_state->mode_flags |= I915_MODE_FLAG_VRR; - } else if (is_cmrr_frac_required(crtc_state) && is_edp) { - crtc_state->vrr.enable = true; - crtc_state->cmrr.enable = true; - /* - * TODO: Compute precise target refresh rate to determine - * if video_mode_required should be true. Currently set to - * false due to uncertainty about the precise target - * refresh Rate. - */ - crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false); - crtc_state->vrr.vmin = crtc_state->vrr.vmax; - crtc_state->vrr.flipline = crtc_state->vrr.vmin; - crtc_state->mode_flags |= I915_MODE_FLAG_VRR; - } - if (HAS_AS_SDP(display)) { crtc_state->vrr.vsync_start = (crtc_state->hw.adjusted_mode.crtc_vtotal - @@ -380,14 +484,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state) lower_32_bits(crtc_state->cmrr.cmrr_n)); } - intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), - crtc_state->vrr.vmin - 1); - intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), - crtc_state->vrr.vmax - 1); - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - trans_vrr_ctl(crtc_state)); - intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), - crtc_state->vrr.flipline - 1); + intel_vrr_set_fixed_rr_timings(crtc_state); + + if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable) + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), + trans_vrr_ctl(crtc_state)); if (HAS_AS_SDP(display)) intel_de_write(display, @@ -461,6 +562,17 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND; } +bool intel_vrr_always_use_vrr_tg(struct intel_display *display) +{ + if (!HAS_VRR(display)) + return false; + + if (DISPLAY_VER(display) >= 30) + return true; + + return false; +} + void intel_vrr_enable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -469,16 +581,25 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state) if (!crtc_state->vrr.enable) return; + intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), + crtc_state->vrr.vmin - 1); + intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), + crtc_state->vrr.vmax - 1); + intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), + crtc_state->vrr.flipline - 1); + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN); - if (crtc_state->cmrr.enable) { - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE | - trans_vrr_ctl(crtc_state)); - } else { - intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); + if (!intel_vrr_always_use_vrr_tg(display)) { + if (crtc_state->cmrr.enable) { + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), + VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE | + trans_vrr_ctl(crtc_state)); + } else { + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), + VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); + } } } @@ -490,24 +611,77 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) if (!old_crtc_state->vrr.enable) return; + if (!intel_vrr_always_use_vrr_tg(display)) { + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), + trans_vrr_ctl(old_crtc_state)); + intel_de_wait_for_clear(display, + TRANS_VRR_STATUS(display, cpu_transcoder), + VRR_STATUS_VRR_EN_LIVE, 1000); + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); + } + + intel_vrr_set_fixed_rr_timings(old_crtc_state); +} + +void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!HAS_VRR(display)) + return; + + if (!intel_vrr_possible(crtc_state)) + return; + + if (!intel_vrr_always_use_vrr_tg(display)) { + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), + trans_vrr_ctl(crtc_state)); + return; + } + + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), + TRANS_PUSH_EN); + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), - trans_vrr_ctl(old_crtc_state)); - intel_de_wait_for_clear(display, - TRANS_VRR_STATUS(display, cpu_transcoder), + VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state)); +} + +void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!HAS_VRR(display)) + return; + + if (!intel_vrr_possible(crtc_state)) + return; + + intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0); + + intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder), VRR_STATUS_VRR_EN_LIVE, 1000); intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); } +bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->vrr.flipline && + crtc_state->vrr.flipline == crtc_state->vrr.vmax && + crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state); +} + void intel_vrr_get_config(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 trans_vrr_ctl, trans_vrr_vsync; + bool vrr_enable; trans_vrr_ctl = intel_de_read(display, TRANS_VRR_CTL(display, cpu_transcoder)); - crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; if (HAS_CMRR(display)) crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE); @@ -536,6 +710,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) crtc_state->vrr.vmin = intel_de_read(display, TRANS_VRR_VMIN(display, cpu_transcoder)) + 1; + /* + * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal + * bits are not filled. Since for these platforms TRAN_VMIN is always + * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for + * adjusted_mode. + */ + if (intel_vrr_always_use_vrr_tg(display)) + crtc_state->hw.adjusted_mode.crtc_vtotal = + intel_vrr_vmin_vtotal(crtc_state); + if (HAS_AS_SDP(display)) { trans_vrr_vsync = intel_de_read(display, @@ -547,6 +731,18 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) } } + vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; + + if (intel_vrr_always_use_vrr_tg(display)) + crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state); + else + crtc_state->vrr.enable = vrr_enable; + + /* + * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags. + * Since CMRR is currently disabled, set this flag for VRR for now. + * Need to keep this in mind while re-enabling CMRR. + */ if (crtc_state->vrr.enable) crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 514822577e8a..38bf9996b883 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -13,6 +13,7 @@ struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; struct intel_dsb; +struct intel_display; bool intel_vrr_is_capable(struct intel_connector *connector); bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh); @@ -35,5 +36,10 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state); int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state); int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state); int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state); +bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state); +void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state); +void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state); +void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state); +bool intel_vrr_always_use_vrr_tg(struct intel_display *display); #endif /* __INTEL_VRR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c index f00f4cfc58e5..bba82e888db2 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.c +++ b/drivers/gpu/drm/i915/display/intel_wm.c @@ -5,15 +5,18 @@ #include <linux/debugfs.h> -#include "i915_drv.h" +#include <drm/drm_file.h> +#include <drm/drm_print.h> + #include "i9xx_wm.h" +#include "intel_display_core.h" #include "intel_display_types.h" #include "intel_wm.h" #include "skl_watermark.h" /** * intel_update_watermarks - update FIFO watermark values based on current modes - * @i915: i915 device + * @display: display device * * Calculate watermark values for the various WM regs based on current mode * and plane configuration. @@ -44,10 +47,10 @@ * We don't use the sprite, so we can ignore that. And on Crestline we have * to set the non-SR watermarks to 8. */ -void intel_update_watermarks(struct drm_i915_private *i915) +void intel_update_watermarks(struct intel_display *display) { - if (i915->display.funcs.wm->update_wm) - i915->display.funcs.wm->update_wm(i915); + if (display->funcs.wm->update_wm) + display->funcs.wm->update_wm(display); } int intel_wm_compute(struct intel_atomic_state *state, @@ -64,10 +67,10 @@ int intel_wm_compute(struct intel_atomic_state *state, bool intel_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); - if (i915->display.funcs.wm->initial_watermarks) { - i915->display.funcs.wm->initial_watermarks(state, crtc); + if (display->funcs.wm->initial_watermarks) { + display->funcs.wm->initial_watermarks(state, crtc); return true; } @@ -77,41 +80,41 @@ bool intel_initial_watermarks(struct intel_atomic_state *state, void intel_atomic_update_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); - if (i915->display.funcs.wm->atomic_update_watermarks) - i915->display.funcs.wm->atomic_update_watermarks(state, crtc); + if (display->funcs.wm->atomic_update_watermarks) + display->funcs.wm->atomic_update_watermarks(state, crtc); } void intel_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); - if (i915->display.funcs.wm->optimize_watermarks) - i915->display.funcs.wm->optimize_watermarks(state, crtc); + if (display->funcs.wm->optimize_watermarks) + display->funcs.wm->optimize_watermarks(state, crtc); } int intel_compute_global_watermarks(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); - if (i915->display.funcs.wm->compute_global_watermarks) - return i915->display.funcs.wm->compute_global_watermarks(state); + if (display->funcs.wm->compute_global_watermarks) + return display->funcs.wm->compute_global_watermarks(state); return 0; } -void intel_wm_get_hw_state(struct drm_i915_private *i915) +void intel_wm_get_hw_state(struct intel_display *display) { - if (i915->display.funcs.wm->get_hw_state) - return i915->display.funcs.wm->get_hw_state(i915); + if (display->funcs.wm->get_hw_state) + return display->funcs.wm->get_hw_state(display); } -void intel_wm_sanitize(struct drm_i915_private *i915) +void intel_wm_sanitize(struct intel_display *display) { - if (i915->display.funcs.wm->sanitize) - return i915->display.funcs.wm->sanitize(i915); + if (display->funcs.wm->sanitize) + return display->funcs.wm->sanitize(display); } bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, @@ -137,16 +140,16 @@ bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, return plane_state->uapi.visible; } -void intel_print_wm_latency(struct drm_i915_private *dev_priv, +void intel_print_wm_latency(struct intel_display *display, const char *name, const u16 wm[]) { int level; - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { unsigned int latency = wm[level]; if (latency == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "%s WM%d latency not provided\n", name, level); continue; @@ -156,43 +159,43 @@ void intel_print_wm_latency(struct drm_i915_private *dev_priv, * - latencies are in us on gen9. * - before then, WM1+ latency values are in 0.5us units */ - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(display) >= 9) latency *= 10; else if (level > 0) latency *= 5; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "%s WM%d latency %u (%u.%u usec)\n", name, level, wm[level], latency / 10, latency % 10); } } -void intel_wm_init(struct drm_i915_private *i915) +void intel_wm_init(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 9) - skl_wm_init(i915); + if (DISPLAY_VER(display) >= 9) + skl_wm_init(display); else - i9xx_wm_init(i915); + i9xx_wm_init(display); } static void wm_latency_show(struct seq_file *m, const u16 wm[8]) { - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; int level; - drm_modeset_lock_all(&dev_priv->drm); + drm_modeset_lock_all(display->drm); - for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { unsigned int latency = wm[level]; /* * - WM1+ latency values in 0.5us units * - latencies are in us on gen9/vlv/chv */ - if (DISPLAY_VER(dev_priv) >= 9 || - IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_G4X(dev_priv)) + if (DISPLAY_VER(display) >= 9 || + display->platform.valleyview || + display->platform.cherryview || + display->platform.g4x) latency *= 10; else if (level > 0) latency *= 5; @@ -201,18 +204,18 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8]) level, wm[level], latency / 10, latency % 10); } - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_unlock_all(display->drm); } static int pri_wm_latency_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; const u16 *latencies; - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; + if (DISPLAY_VER(display) >= 9) + latencies = display->wm.skl_latency; else - latencies = dev_priv->display.wm.pri_latency; + latencies = display->wm.pri_latency; wm_latency_show(m, latencies); @@ -221,13 +224,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) static int spr_wm_latency_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; const u16 *latencies; - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; + if (DISPLAY_VER(display) >= 9) + latencies = display->wm.skl_latency; else - latencies = dev_priv->display.wm.spr_latency; + latencies = display->wm.spr_latency; wm_latency_show(m, latencies); @@ -236,13 +239,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) static int cur_wm_latency_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; const u16 *latencies; - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; + if (DISPLAY_VER(display) >= 9) + latencies = display->wm.skl_latency; else - latencies = dev_priv->display.wm.cur_latency; + latencies = display->wm.cur_latency; wm_latency_show(m, latencies); @@ -251,39 +254,39 @@ static int cur_wm_latency_show(struct seq_file *m, void *data) static int pri_wm_latency_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct intel_display *display = inode->i_private; - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + if (DISPLAY_VER(display) < 5 && !display->platform.g4x) return -ENODEV; - return single_open(file, pri_wm_latency_show, dev_priv); + return single_open(file, pri_wm_latency_show, display); } static int spr_wm_latency_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct intel_display *display = inode->i_private; - if (HAS_GMCH(dev_priv)) + if (HAS_GMCH(display)) return -ENODEV; - return single_open(file, spr_wm_latency_show, dev_priv); + return single_open(file, spr_wm_latency_show, display); } static int cur_wm_latency_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct intel_display *display = inode->i_private; - if (HAS_GMCH(dev_priv)) + if (HAS_GMCH(display)) return -ENODEV; - return single_open(file, cur_wm_latency_show, dev_priv); + return single_open(file, cur_wm_latency_show, display); } static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp, u16 wm[8]) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; u16 new[8] = {}; int level; int ret; @@ -300,15 +303,15 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4], &new[5], &new[6], &new[7]); - if (ret != dev_priv->display.wm.num_levels) + if (ret != display->wm.num_levels) return -EINVAL; - drm_modeset_lock_all(&dev_priv->drm); + drm_modeset_lock_all(display->drm); - for (level = 0; level < dev_priv->display.wm.num_levels; level++) + for (level = 0; level < display->wm.num_levels; level++) wm[level] = new[level]; - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_unlock_all(display->drm); return len; } @@ -317,13 +320,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; u16 *latencies; - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; + if (DISPLAY_VER(display) >= 9) + latencies = display->wm.skl_latency; else - latencies = dev_priv->display.wm.pri_latency; + latencies = display->wm.pri_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -332,13 +335,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; u16 *latencies; - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; + if (DISPLAY_VER(display) >= 9) + latencies = display->wm.skl_latency; else - latencies = dev_priv->display.wm.spr_latency; + latencies = display->wm.spr_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -347,13 +350,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = m->private; u16 *latencies; - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; + if (DISPLAY_VER(display) >= 9) + latencies = display->wm.skl_latency; else - latencies = dev_priv->display.wm.cur_latency; + latencies = display->wm.cur_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -385,18 +388,18 @@ static const struct file_operations i915_cur_wm_latency_fops = { .write = cur_wm_latency_write }; -void intel_wm_debugfs_register(struct drm_i915_private *i915) +void intel_wm_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, - i915, &i915_pri_wm_latency_fops); + display, &i915_pri_wm_latency_fops); debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, - i915, &i915_spr_wm_latency_fops); + display, &i915_spr_wm_latency_fops); debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, - i915, &i915_cur_wm_latency_fops); + display, &i915_cur_wm_latency_fops); - skl_watermark_debugfs_register(i915); + skl_watermark_debugfs_register(display); } diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h index 7d3a447054b3..9ad4e9eae5ca 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.h +++ b/drivers/gpu/drm/i915/display/intel_wm.h @@ -8,13 +8,13 @@ #include <linux/types.h> -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_plane_state; -void intel_update_watermarks(struct drm_i915_private *i915); +void intel_update_watermarks(struct intel_display *display); int intel_wm_compute(struct intel_atomic_state *state, struct intel_crtc *crtc); bool intel_initial_watermarks(struct intel_atomic_state *state, @@ -24,13 +24,13 @@ void intel_atomic_update_watermarks(struct intel_atomic_state *state, void intel_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_compute_global_watermarks(struct intel_atomic_state *state); -void intel_wm_get_hw_state(struct drm_i915_private *i915); -void intel_wm_sanitize(struct drm_i915_private *i915); +void intel_wm_get_hw_state(struct intel_display *display); +void intel_wm_sanitize(struct intel_display *display); bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_print_wm_latency(struct drm_i915_private *i915, +void intel_print_wm_latency(struct intel_display *display, const char *name, const u16 wm[]); -void intel_wm_init(struct drm_i915_private *i915); -void intel_wm_debugfs_register(struct drm_i915_private *i915); +void intel_wm_init(struct intel_display *display); +void intel_wm_debugfs_register(struct intel_display *display); #endif /* __INTEL_WM_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 70e550539bb2..8739195aba69 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -2689,22 +2689,24 @@ static const struct drm_plane_funcs tgl_plane_funcs = { static void skl_plane_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); + bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); spin_unlock_irq(&i915->irq_lock); } static void skl_plane_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); + bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); spin_unlock_irq(&i915->irq_lock); } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 621e97943542..8080f777910a 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -19,6 +19,7 @@ #include "intel_de.h" #include "intel_display.h" #include "intel_display_power.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fixed.h" @@ -34,7 +35,7 @@ */ #define DSB_EXE_TIME 100 -static void skl_sagv_disable(struct drm_i915_private *i915); +static void skl_sagv_disable(struct intel_display *display); /* Stores plane specific WM parameters */ struct skl_wm_params { @@ -69,23 +70,21 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display) * FIXME: We still don't have the proper code detect if we need to apply the WA, * so assume we'll always need it in order to avoid underruns. */ -static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) +static bool skl_needs_memory_bw_wa(struct intel_display *display) { - return DISPLAY_VER(i915) == 9; + return DISPLAY_VER(display) == 9; } bool -intel_has_sagv(struct drm_i915_private *i915) +intel_has_sagv(struct intel_display *display) { - struct intel_display *display = &i915->display; - return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED; } static u32 -intel_sagv_block_time(struct drm_i915_private *i915) +intel_sagv_block_time(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); if (DISPLAY_VER(display) >= 14) { u32 val; @@ -115,10 +114,8 @@ intel_sagv_block_time(struct drm_i915_private *i915) } } -static void intel_sagv_init(struct drm_i915_private *i915) +static void intel_sagv_init(struct intel_display *display) { - struct intel_display *display = &i915->display; - if (!HAS_SAGV(display)) display->sagv.status = I915_SAGV_NOT_CONTROLLED; @@ -127,14 +124,14 @@ static void intel_sagv_init(struct drm_i915_private *i915) * For icl+ this was already determined by intel_bw_init_hw(). */ if (DISPLAY_VER(display) < 11) - skl_sagv_disable(i915); + skl_sagv_disable(display); drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN); - display->sagv.block_time_us = intel_sagv_block_time(i915); + display->sagv.block_time_us = intel_sagv_block_time(display); drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n", - str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us); + str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us); /* avoid overflow when adding with wm0 latency/etc. */ if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX, @@ -142,7 +139,7 @@ static void intel_sagv_init(struct drm_i915_private *i915) display->sagv.block_time_us)) display->sagv.block_time_us = 0; - if (!intel_has_sagv(i915)) + if (!intel_has_sagv(display)) display->sagv.block_time_us = 0; } @@ -157,17 +154,18 @@ static void intel_sagv_init(struct drm_i915_private *i915) * - All planes can enable watermarks for latencies >= SAGV engine block time * - We're not using an interlaced display configuration */ -static void skl_sagv_enable(struct drm_i915_private *i915) +static void skl_sagv_enable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; - if (!intel_has_sagv(i915)) + if (!intel_has_sagv(display)) return; - if (i915->display.sagv.status == I915_SAGV_ENABLED) + if (display->sagv.status == I915_SAGV_ENABLED) return; - drm_dbg_kms(&i915->drm, "Enabling SAGV\n"); + drm_dbg_kms(display->drm, "Enabling SAGV\n"); ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); @@ -177,29 +175,30 @@ static void skl_sagv_enable(struct drm_i915_private *i915) * Some skl systems, pre-release machines in particular, * don't actually have SAGV. */ - if (IS_SKYLAKE(i915) && ret == -ENXIO) { - drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); - i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + if (display->platform.skylake && ret == -ENXIO) { + drm_dbg(display->drm, "No SAGV found on system, ignoring\n"); + display->sagv.status = I915_SAGV_NOT_CONTROLLED; return; } else if (ret < 0) { - drm_err(&i915->drm, "Failed to enable SAGV\n"); + drm_err(display->drm, "Failed to enable SAGV\n"); return; } - i915->display.sagv.status = I915_SAGV_ENABLED; + display->sagv.status = I915_SAGV_ENABLED; } -static void skl_sagv_disable(struct drm_i915_private *i915) +static void skl_sagv_disable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); int ret; - if (!intel_has_sagv(i915)) + if (!intel_has_sagv(display)) return; - if (i915->display.sagv.status == I915_SAGV_DISABLED) + if (display->sagv.status == I915_SAGV_DISABLED) return; - drm_dbg_kms(&i915->drm, "Disabling SAGV\n"); + drm_dbg_kms(display->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, @@ -209,47 +208,47 @@ static void skl_sagv_disable(struct drm_i915_private *i915) * Some skl systems, pre-release machines in particular, * don't actually have SAGV. */ - if (IS_SKYLAKE(i915) && ret == -ENXIO) { - drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); - i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + if (display->platform.skylake && ret == -ENXIO) { + drm_dbg(display->drm, "No SAGV found on system, ignoring\n"); + display->sagv.status = I915_SAGV_NOT_CONTROLLED; return; } else if (ret < 0) { - drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret); + drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret); return; } - i915->display.sagv.status = I915_SAGV_DISABLED; + display->sagv.status = I915_SAGV_DISABLED; } static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_bw_state *new_bw_state = intel_atomic_get_new_bw_state(state); if (!new_bw_state) return; - if (!intel_can_enable_sagv(i915, new_bw_state)) - skl_sagv_disable(i915); + if (!intel_can_enable_sagv(display, new_bw_state)) + skl_sagv_disable(display); } static void skl_sagv_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_bw_state *new_bw_state = intel_atomic_get_new_bw_state(state); if (!new_bw_state) return; - if (intel_can_enable_sagv(i915, new_bw_state)) - skl_sagv_enable(i915); + if (intel_can_enable_sagv(display, new_bw_state)) + skl_sagv_enable(display); } static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_bw_state *old_bw_state = intel_atomic_get_old_bw_state(state); const struct intel_bw_state *new_bw_state = @@ -267,7 +266,7 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) WARN_ON(!new_bw_state->base.changed); - drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n", + drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n", old_mask, new_mask); /* @@ -276,12 +275,12 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) * time. Also masking should be done before updating the configuration * and unmasking afterwards. */ - icl_pcode_restrict_qgv_points(i915, new_mask); + icl_pcode_restrict_qgv_points(display, new_mask); } static void icl_sagv_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_bw_state *old_bw_state = intel_atomic_get_old_bw_state(state); const struct intel_bw_state *new_bw_state = @@ -299,7 +298,7 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state) WARN_ON(!new_bw_state->base.changed); - drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", + drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", old_mask, new_mask); /* @@ -308,12 +307,12 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state) * time. Also masking should be done before updating the configuration * and unmasking afterwards. */ - icl_pcode_restrict_qgv_points(i915, new_mask); + icl_pcode_restrict_qgv_points(display, new_mask); } void intel_sagv_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); /* * Just return if we can't control SAGV or don't have it. @@ -322,10 +321,10 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state) * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here. */ - if (!intel_has_sagv(i915)) + if (!intel_has_sagv(display)) return; - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) icl_sagv_pre_plane_update(state); else skl_sagv_pre_plane_update(state); @@ -333,7 +332,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state) void intel_sagv_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); /* * Just return if we can't control SAGV or don't have it. @@ -342,10 +341,10 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state) * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here. */ - if (!intel_has_sagv(i915)) + if (!intel_has_sagv(display)) return; - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) icl_sagv_post_plane_update(state); else skl_sagv_post_plane_update(state); @@ -353,12 +352,12 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state) static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum plane_id plane_id; int max_level = INT_MAX; - if (!intel_has_sagv(i915)) + if (!intel_has_sagv(display)) return false; if (!crtc_state->hw.active) @@ -377,7 +376,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) continue; /* Find the highest enabled wm level for this plane */ - for (level = i915->display.wm.num_levels - 1; + for (level = display->wm.num_levels - 1; !wm->wm[level].enable; --level) { } @@ -423,104 +422,37 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) return true; } -static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); - if (!i915->display.params.enable_sagv) + if (!display->params.enable_sagv) return false; - if (DISPLAY_VER(i915) >= 12) + /* + * SAGV is initially forced off because its current + * state can't be queried from pcode. Allow SAGV to + * be enabled upon the first real commit. + */ + if (crtc_state->inherited) + return false; + + if (DISPLAY_VER(display) >= 12) return tgl_crtc_can_enable_sagv(crtc_state); else return skl_crtc_can_enable_sagv(crtc_state); } -bool intel_can_enable_sagv(struct drm_i915_private *i915, +bool intel_can_enable_sagv(struct intel_display *display, const struct intel_bw_state *bw_state) { - if (DISPLAY_VER(i915) < 11 && + if (DISPLAY_VER(display) < 11 && bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) return false; return bw_state->pipe_sagv_reject == 0; } -static int intel_compute_sagv_mask(struct intel_atomic_state *state) -{ - struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); - int ret; - struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; - struct intel_bw_state *new_bw_state = NULL; - const struct intel_bw_state *old_bw_state = NULL; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; - - new_bw_state = intel_atomic_get_bw_state(state); - if (IS_ERR(new_bw_state)) - return PTR_ERR(new_bw_state); - - old_bw_state = intel_atomic_get_old_bw_state(state); - - /* - * We store use_sagv_wm in the crtc state rather than relying on - * that bw state since we have no convenient way to get at the - * latter from the plane commit hooks (especially in the legacy - * cursor case). - * - * drm_atomic_check_only() gets upset if we pull more crtcs - * into the state, so we have to calculate this based on the - * individual intel_crtc_can_enable_sagv() rather than - * the overall intel_can_enable_sagv(). Otherwise the - * crtcs not included in the commit would not switch to the - * SAGV watermarks when we are about to enable SAGV, and that - * would lead to underruns. This does mean extra power draw - * when only a subset of the crtcs are blocking SAGV as the - * other crtcs can't be allowed to use the more optimal - * normal (ie. non-SAGV) watermarks. - */ - pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) && - DISPLAY_VER(i915) >= 12 && - intel_crtc_can_enable_sagv(new_crtc_state); - - if (intel_crtc_can_enable_sagv(new_crtc_state)) - new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); - else - new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); - } - - if (!new_bw_state) - return 0; - - new_bw_state->active_pipes = - intel_calc_active_pipes(state, old_bw_state->active_pipes); - - if (new_bw_state->active_pipes != old_bw_state->active_pipes) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - if (intel_can_enable_sagv(i915, new_bw_state) != - intel_can_enable_sagv(i915, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - return 0; -} - static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, u16 start, u16 end) { @@ -530,17 +462,17 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, return end; } -static int intel_dbuf_slice_size(struct drm_i915_private *i915) +static int intel_dbuf_slice_size(struct intel_display *display) { - return DISPLAY_INFO(i915)->dbuf.size / - hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask); + return DISPLAY_INFO(display)->dbuf.size / + hweight8(DISPLAY_INFO(display)->dbuf.slice_mask); } static void -skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, +skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask, struct skl_ddb_entry *ddb) { - int slice_size = intel_dbuf_slice_size(i915); + int slice_size = intel_dbuf_slice_size(display); if (!slice_mask) { ddb->start = 0; @@ -552,10 +484,10 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, ddb->end = fls(slice_mask) * slice_size; WARN_ON(ddb->start >= ddb->end); - WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size); + WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size); } -static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) +static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask) { struct skl_ddb_entry ddb; @@ -564,15 +496,15 @@ static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) slice_mask = BIT(DBUF_S3); - skl_ddb_entry_for_slices(i915, slice_mask, &ddb); + skl_ddb_entry_for_slices(display, slice_mask, &ddb); return ddb.start; } -u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, +u32 skl_ddb_dbuf_slice_mask(struct intel_display *display, const struct skl_ddb_entry *entry) { - int slice_size = intel_dbuf_slice_size(i915); + int slice_size = intel_dbuf_slice_size(display); enum dbuf_slice start_slice, end_slice; u8 slice_mask = 0; @@ -618,15 +550,14 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, unsigned int *weight_end, unsigned int *weight_total) { - struct drm_i915_private *i915 = - to_i915(dbuf_state->base.state->base.dev); + struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev); enum pipe pipe; *weight_start = 0; *weight_end = 0; *weight_total = 0; - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { int weight = dbuf_state->weight[pipe]; /* @@ -652,7 +583,7 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, static int skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); unsigned int weight_total, weight_start, weight_end; const struct intel_dbuf_state *old_dbuf_state = intel_atomic_get_old_dbuf_state(state); @@ -674,8 +605,8 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) dbuf_slice_mask = new_dbuf_state->slices[pipe]; - skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices); - mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask); + skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices); + mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask); ddb_range_size = skl_ddb_entry_size(&ddb_slices); intel_crtc_dbuf_weights(new_dbuf_state, pipe, @@ -709,7 +640,7 @@ out: crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", crtc->base.base.id, crtc->base.name, old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], @@ -734,10 +665,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */); -static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level, +static unsigned int skl_wm_latency(struct intel_display *display, int level, const struct skl_wm_params *wp) { - unsigned int latency = i915->display.wm.skl_latency[level]; + unsigned int latency = display->wm.skl_latency[level]; if (latency == 0) return 0; @@ -746,11 +677,11 @@ static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level, * WaIncreaseLatencyIPCEnabled: kbl,cfl * Display WA #1141: kbl,cfl */ - if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && - skl_watermark_ipc_enabled(i915)) + if ((display->platform.kabylake || display->platform.coffeelake || + display->platform.cometlake) && skl_watermark_ipc_enabled(display)) latency += 4; - if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled) + if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled) latency += 15; return latency; @@ -760,8 +691,8 @@ static unsigned int skl_cursor_allocation(const struct intel_crtc_state *crtc_state, int num_active) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wm_level wm = {}; int ret, min_ddb_alloc = 0; struct skl_wm_params wp; @@ -772,10 +703,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, DRM_FORMAT_MOD_LINEAR, DRM_MODE_ROTATE_0, crtc_state->pixel_rate, &wp, 0, 0); - drm_WARN_ON(&i915->drm, ret); + drm_WARN_ON(display->drm, ret); - for (level = 0; level < i915->display.wm.num_levels; level++) { - unsigned int latency = skl_wm_latency(i915, level, &wp); + for (level = 0; level < display->wm.num_levels; level++) { + unsigned int latency = skl_wm_latency(display, level, &wp); skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); if (wm.min_ddb_alloc == U16_MAX) @@ -797,14 +728,13 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) } static void -skl_ddb_get_hw_plane_state(struct drm_i915_private *i915, +skl_ddb_get_hw_plane_state(struct intel_display *display, const enum pipe pipe, const enum plane_id plane_id, struct skl_ddb_entry *ddb, struct skl_ddb_entry *ddb_y, u16 *min_ddb, u16 *interim_ddb) { - struct intel_display *display = &i915->display; u32 val; /* Cursor doesn't support NV12/planar, so no extra calculation needed */ @@ -837,7 +767,6 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, u16 *min_ddb, u16 *interim_ddb) { struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; @@ -849,7 +778,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, return; for_each_plane_id_on_crtc(crtc, plane_id) - skl_ddb_get_hw_plane_state(i915, pipe, + skl_ddb_get_hw_plane_state(display, pipe, plane_id, &ddb[plane_id], &ddb_y[plane_id], @@ -1367,16 +1296,16 @@ static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbu static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; - if (IS_DG2(i915)) + if (display->platform.dg2) return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(i915) >= 13) + else if (DISPLAY_VER(display) >= 13) return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(i915) == 12) + else if (DISPLAY_VER(display) == 12) return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(i915) == 11) + else if (DISPLAY_VER(display) == 11) return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); /* * For anything else just return one slice yet. @@ -1416,8 +1345,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, static u64 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum plane_id plane_id; u64 data_rate = 0; @@ -1427,7 +1356,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) data_rate += crtc_state->rel_data_rate[plane_id]; - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) data_rate += crtc_state->rel_data_rate_y[plane_id]; } @@ -1489,7 +1418,7 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, } } -static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level, +static bool skl_need_wm_copy_wa(struct intel_display *display, int level, const struct skl_plane_wm *wm) { /* @@ -1543,7 +1472,6 @@ static int skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_dbuf_state *dbuf_state = @@ -1585,7 +1513,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * Find the highest watermark level for which we can satisfy the block * requirement of active planes. */ - for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { + for (level = display->wm.num_levels - 1; level >= 0; level--) { blocks = 0; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = @@ -1596,7 +1524,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, &crtc_state->wm.skl.plane_ddb[plane_id]; if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; break; @@ -1615,9 +1543,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, } if (level < 0) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Requested display configuration exceeds system DDB limitations"); - drm_dbg_kms(&i915->drm, "minimum required %d/%d\n", + drm_dbg_kms(display->drm, "minimum required %d/%d\n", blocks, iter.size); return -EINVAL; } @@ -1645,7 +1573,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, if (plane_id == PLANE_CURSOR) continue; - if (DISPLAY_VER(i915) < 11 && + if (DISPLAY_VER(display) < 11 && crtc_state->nv12_planes & BIT(plane_id)) { skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], crtc_state->rel_data_rate_y[plane_id]); @@ -1661,7 +1589,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, *interim_ddb = wm->sagv.wm0.min_ddb_alloc; } } - drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0); + drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0); /* * When we calculated watermark values we didn't know how high @@ -1669,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * all levels as "enabled." Go back now and disable the ones * that aren't actually possible. */ - for (level++; level < i915->display.wm.num_levels; level++) { + for (level++; level < display->wm.num_levels; level++) { for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; @@ -1678,7 +1606,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (DISPLAY_VER(i915) < 11 && + if (DISPLAY_VER(display) < 11 && crtc_state->nv12_planes & BIT(plane_id)) skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], @@ -1686,7 +1614,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, else skl_check_wm_level(&wm->wm[level], ddb); - if (skl_need_wm_copy_wa(i915, level, wm)) { + if (skl_need_wm_copy_wa(display, level, wm)) { wm->wm[level].blocks = wm->wm[level - 1].blocks; wm->wm[level].lines = wm->wm[level - 1].lines; wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines; @@ -1708,7 +1636,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (DISPLAY_VER(i915) < 11 && + if (DISPLAY_VER(display) < 11 && crtc_state->nv12_planes & BIT(plane_id)) { skl_check_wm_level(&wm->trans_wm, ddb_y); } else { @@ -1734,7 +1662,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ static uint_fixed_16_16_t -skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate, +skl_wm_method1(struct intel_display *display, u32 pixel_rate, u8 cpp, u32 latency, u32 dbuf_block_size) { u32 wm_intermediate_val; @@ -1746,7 +1674,7 @@ skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate, wm_intermediate_val = latency * pixel_rate * cpp; ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); - if (DISPLAY_VER(i915) >= 10) + if (DISPLAY_VER(display) >= 10) ret = add_fixed16_u32(ret, 1); return ret; @@ -1772,7 +1700,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, static uint_fixed_16_16_t intel_get_linetime_us(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); u32 pixel_rate; u32 crtc_htotal; uint_fixed_16_16_t linetime_us; @@ -1782,7 +1710,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state) pixel_rate = crtc_state->pixel_rate; - if (drm_WARN_ON(&i915->drm, pixel_rate == 0)) + if (drm_WARN_ON(display->drm, pixel_rate == 0)) return u32_to_fixed16(0); crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; @@ -1798,15 +1726,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane, unsigned int pan_x) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 interm_pbpl; /* only planar format has two planes */ if (color_plane == 1 && !intel_format_info_is_yuv_semiplanar(format, modifier)) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Non planar format have single plane\n"); return -EINVAL; } @@ -1824,7 +1750,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->cpp = format->cpp[color_plane]; wp->plane_pixel_rate = plane_pixel_rate; - if (DISPLAY_VER(i915) >= 11 && + if (DISPLAY_VER(display) >= 11 && modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) wp->dbuf_block_size = 256; else @@ -1849,7 +1775,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->y_min_scanlines = 4; } - if (skl_needs_memory_bw_wa(i915)) + if (skl_needs_memory_bw_wa(display)) wp->y_min_scanlines *= 2; wp->plane_bytes_per_line = wp->width * wp->cpp; @@ -1860,7 +1786,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, if (DISPLAY_VER(display) >= 30) interm_pbpl += (pan_x != 0); - else if (DISPLAY_VER(i915) >= 10) + else if (DISPLAY_VER(display) >= 10) interm_pbpl++; wp->plane_blocks_per_line = div_fixed16(interm_pbpl, @@ -1869,7 +1795,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, wp->dbuf_block_size); - if (!wp->x_tiled || DISPLAY_VER(i915) >= 10) + if (!wp->x_tiled || DISPLAY_VER(display) >= 10) interm_pbpl++; wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); @@ -1906,18 +1832,18 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, plane_state->uapi.src.x1); } -static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) +static bool skl_wm_has_lines(struct intel_display *display, int level) { - if (DISPLAY_VER(i915) >= 10) + if (DISPLAY_VER(display) >= 10) return true; /* The number of lines are ignored for the level 0 watermark. */ return level > 0; } -static int skl_wm_max_lines(struct drm_i915_private *i915) +static int skl_wm_max_lines(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(display) >= 13) return 255; else return 31; @@ -1938,7 +1864,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; u32 blocks, lines, min_ddb_alloc = 0; @@ -1950,7 +1876,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, return; } - method1 = skl_wm_method1(i915, wp->plane_pixel_rate, + method1 = skl_wm_method1(display, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, @@ -1965,7 +1891,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; } else if (latency >= wp->linetime_us) { - if (DISPLAY_VER(i915) == 9) + if (DISPLAY_VER(display) == 9) selected_result = min_fixed16(method1, method2); else selected_result = method2; @@ -1975,7 +1901,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } blocks = fixed16_to_u32_round_up(selected_result); - if (DISPLAY_VER(i915) < 30) + if (DISPLAY_VER(display) < 30) blocks++; /* @@ -1994,13 +1920,13 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, * channels' impact on the level 0 memory latency and the relevant * wm calculations. */ - if (skl_wm_has_lines(i915, level)) + if (skl_wm_has_lines(display, level)) blocks = max(blocks, fixed16_to_u32_round_up(wp->plane_blocks_per_line)); lines = div_round_up_fixed16(selected_result, wp->plane_blocks_per_line); - if (DISPLAY_VER(i915) == 9) { + if (DISPLAY_VER(display) == 9) { /* Display WA #1125: skl,bxt,kbl */ if (level == 0 && wp->rc_surface) blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); @@ -2025,7 +1951,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } } - if (DISPLAY_VER(i915) >= 11) { + if (DISPLAY_VER(display) >= 11) { if (wp->y_tiled) { int extra_lines; @@ -2042,10 +1968,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } } - if (!skl_wm_has_lines(i915, level)) + if (!skl_wm_has_lines(display, level)) lines = 0; - if (lines > skl_wm_max_lines(i915)) { + if (lines > skl_wm_max_lines(display)) { /* reject it */ result->min_ddb_alloc = U16_MAX; return; @@ -2064,8 +1990,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, result->enable = true; result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level); - if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us) - result->can_sagv = latency >= i915->display.sagv.block_time_us; + if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us) + result->can_sagv = latency >= display->sagv.block_time_us; } static void @@ -2074,13 +2000,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct skl_wm_level *result_prev = &levels[0]; int level; - for (level = 0; level < i915->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { struct skl_wm_level *result = &levels[level]; - unsigned int latency = skl_wm_latency(i915, level, wm_params); + unsigned int latency = skl_wm_latency(display, level, wm_params); skl_compute_plane_wm(crtc_state, plane, level, latency, wm_params, result_prev, result); @@ -2094,21 +2020,21 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wm_params, struct skl_plane_wm *plane_wm) { - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; struct skl_wm_level *levels = plane_wm->wm; unsigned int latency = 0; - if (i915->display.sagv.block_time_us) - latency = i915->display.sagv.block_time_us + - skl_wm_latency(i915, 0, wm_params); + if (display->sagv.block_time_us) + latency = display->sagv.block_time_us + + skl_wm_latency(display, 0, wm_params); skl_compute_plane_wm(crtc_state, plane, 0, latency, wm_params, &levels[0], sagv_wm); } -static void skl_compute_transition_wm(struct drm_i915_private *i915, +static void skl_compute_transition_wm(struct intel_display *display, struct skl_wm_level *trans_wm, const struct skl_wm_level *wm0, const struct skl_wm_params *wp) @@ -2117,23 +2043,23 @@ static void skl_compute_transition_wm(struct drm_i915_private *i915, u16 wm0_blocks, trans_offset, blocks; /* Transition WM don't make any sense if ipc is disabled */ - if (!skl_watermark_ipc_enabled(i915)) + if (!skl_watermark_ipc_enabled(display)) return; /* * WaDisableTWM:skl,kbl,cfl,bxt * Transition WM are not recommended by HW team for GEN9 */ - if (DISPLAY_VER(i915) == 9) + if (DISPLAY_VER(display) == 9) return; - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) trans_min = 4; else trans_min = 14; /* Display WA #1140: glk,cnl */ - if (DISPLAY_VER(i915) == 10) + if (DISPLAY_VER(display) == 10) trans_amount = 0; else trans_amount = 10; /* This is configurable amount */ @@ -2175,8 +2101,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct intel_plane *plane, int color_plane) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; @@ -2188,13 +2113,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); - skl_compute_transition_wm(i915, &wm->trans_wm, + skl_compute_transition_wm(display, &wm->trans_wm, &wm->wm[0], &wm_params); - if (DISPLAY_VER(i915) >= 12) { + if (DISPLAY_VER(display) >= 12) { tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); - skl_compute_transition_wm(i915, &wm->sagv.trans_wm, + skl_compute_transition_wm(display, &wm->sagv.trans_wm, &wm->sagv.wm0, &wm_params); } @@ -2254,8 +2179,8 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *i915 = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; int ret; @@ -2269,9 +2194,9 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; - drm_WARN_ON(&i915->drm, + drm_WARN_ON(display->drm, !intel_wm_plane_visible(crtc_state, plane_state)); - drm_WARN_ON(&i915->drm, !fb->format->is_yuv || + drm_WARN_ON(display->drm, !fb->format->is_yuv || fb->format->num_planes == 1); ret = skl_build_plane_wm_single(crtc_state, plane_state, @@ -2411,15 +2336,14 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, int wm0_lines) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); int level; - for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { + for (level = display->wm.num_levels - 1; level >= 0; level--) { int latency; /* FIXME should we care about the latency w/a's? */ - latency = skl_wm_latency(i915, level, NULL); + latency = skl_wm_latency(display, level, NULL); if (latency == 0) continue; @@ -2436,8 +2360,8 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); int wm0_lines, level; if (!crtc_state->hw.active) @@ -2453,9 +2377,9 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* * based on whether we're limited by the vblank duration. */ - crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1; + crtc_state->wm_level_disabled = level < display->wm.num_levels - 1; - for (level++; level < i915->display.wm.num_levels; level++) { + for (level++; level < display->wm.num_levels; level++) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { @@ -2471,10 +2395,10 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) } } - if (DISPLAY_VER(i915) >= 12 && - i915->display.sagv.block_time_us && + if (DISPLAY_VER(display) >= 12 && + display->sagv.block_time_us && skl_is_vblank_too_short(crtc_state, wm0_lines, - i915->display.sagv.block_time_us)) { + display->sagv.block_time_us)) { enum plane_id plane_id; for_each_plane_id_on_crtc(crtc, plane_id) { @@ -2492,7 +2416,7 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) static int skl_build_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state; @@ -2508,7 +2432,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state, if (plane->pipe != crtc->pipe) continue; - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(display) >= 11) ret = icl_build_plane_wm(crtc_state, plane_state); else ret = skl_build_plane_wm(crtc_state, plane_state); @@ -2531,11 +2455,10 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1, l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable; } -static bool skl_plane_wm_equals(struct drm_i915_private *i915, +static bool skl_plane_wm_equals(struct intel_display *display, const struct skl_plane_wm *wm1, const struct skl_plane_wm *wm2) { - struct intel_display *display = &i915->display; int level; for (level = 0; level < display->wm.num_levels; level++) { @@ -2590,14 +2513,14 @@ static int skl_ddb_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane *plane; - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { struct intel_plane_state *plane_state; enum plane_id plane_id = plane->id; @@ -2608,7 +2531,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state, continue; if (new_crtc_state->do_async_flip) { - drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n", + drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } @@ -2627,7 +2550,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state, static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) { - struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev); + struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev); u8 enabled_slices; enum pipe pipe; @@ -2637,7 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) */ enabled_slices = BIT(DBUF_S1); - for_each_pipe(i915, pipe) + for_each_pipe(display, pipe) enabled_slices |= dbuf_state->slices[pipe]; return enabled_slices; @@ -2647,7 +2570,6 @@ static int skl_compute_ddb(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_dbuf_state *old_dbuf_state; struct intel_dbuf_state *new_dbuf_state = NULL; struct intel_crtc_state *new_crtc_state; @@ -2686,7 +2608,7 @@ skl_compute_ddb(struct intel_atomic_state *state) } } - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { enum pipe pipe = crtc->pipe; new_dbuf_state->slices[pipe] = @@ -2709,11 +2631,11 @@ skl_compute_ddb(struct intel_atomic_state *state) if (ret) return ret; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices, - DISPLAY_INFO(i915)->dbuf.slice_mask, + DISPLAY_INFO(display)->dbuf.slice_mask, str_yes_no(old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state->joined_mbus)); } @@ -2731,7 +2653,7 @@ skl_compute_ddb(struct intel_atomic_state *state) return ret; } - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { ret = skl_crtc_allocate_ddb(state, crtc); if (ret) return ret; @@ -2758,7 +2680,7 @@ static char enast(bool enable) static void skl_print_wm_changes(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state; const struct intel_crtc_state *new_crtc_state; struct intel_plane *plane; @@ -2775,7 +2697,7 @@ skl_print_wm_changes(struct intel_atomic_state *state) old_pipe_wm = &old_crtc_state->wm.skl.optimal; new_pipe_wm = &new_crtc_state->wm.skl.optimal; - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; @@ -2785,24 +2707,24 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size(old), skl_ddb_entry_size(new)); } - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_plane_wm *old_wm, *new_wm; old_wm = &old_pipe_wm->planes[plane_id]; new_wm = &new_pipe_wm->planes[plane_id]; - if (skl_plane_wm_equals(i915, old_wm, new_wm)) + if (skl_plane_wm_equals(display, old_wm, new_wm)) continue; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", plane->base.base.id, plane->base.name, @@ -2821,7 +2743,7 @@ skl_print_wm_changes(struct intel_atomic_state *state) enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm.enable)); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", plane->base.base.id, plane->base.name, @@ -2848,7 +2770,7 @@ skl_print_wm_changes(struct intel_atomic_state *state) enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane->base.base.id, plane->base.name, @@ -2867,7 +2789,7 @@ skl_print_wm_changes(struct intel_atomic_state *state) new_wm->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks); - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane->base.base.id, plane->base.name, @@ -2945,14 +2867,14 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane, static int skl_wm_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane *plane; - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { struct intel_plane_state *plane_state; enum plane_id plane_id = plane->id; @@ -2971,7 +2893,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, continue; if (new_crtc_state->do_async_flip) { - drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n", + drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n", plane->base.base.id, plane->base.name); return -EINVAL; } @@ -3002,7 +2924,6 @@ void intel_program_dpkgc_latency(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_crtc *crtc; struct intel_crtc_state *new_crtc_state; u32 latency = LNL_PKG_C_LATENCY_MASK; @@ -3028,7 +2949,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state) added_wake_time = DSB_EXE_TIME + display->sagv.block_time_us; - latency = skl_watermark_max_latency(i915, 1); + latency = skl_watermark_max_latency(display, 1); /* Wa_22020432604 */ if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) { @@ -3055,6 +2976,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state) static int skl_compute_wm(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); struct intel_crtc *crtc; struct intel_crtc_state __maybe_unused *new_crtc_state; int ret, i; @@ -3069,16 +2991,35 @@ skl_compute_wm(struct intel_atomic_state *state) if (ret) return ret; - ret = intel_compute_sagv_mask(state); - if (ret) - return ret; - /* * skl_compute_ddb() will have adjusted the final watermarks * based on how much ddb is available. Now we can actually * check if the final watermarks changed. */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; + + /* + * We store use_sagv_wm in the crtc state rather than relying on + * that bw state since we have no convenient way to get at the + * latter from the plane commit hooks (especially in the legacy + * cursor case). + * + * drm_atomic_check_only() gets upset if we pull more crtcs + * into the state, so we have to calculate this based on the + * individual intel_crtc_can_enable_sagv() rather than + * the overall intel_can_enable_sagv(). Otherwise the + * crtcs not included in the commit would not switch to the + * SAGV watermarks when we are about to enable SAGV, and that + * would lead to underruns. This does mean extra power draw + * when only a subset of the crtcs are blocking SAGV as the + * other crtcs can't be allowed to use the more optimal + * normal (ie. non-SAGV) watermarks. + */ + pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) && + DISPLAY_VER(display) >= 12 && + intel_crtc_can_enable_sagv(new_crtc_state); + ret = skl_wm_add_affected_planes(state, crtc); if (ret) return ret; @@ -3149,11 +3090,10 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, } } -static void skl_wm_get_hw_state(struct drm_i915_private *i915) +static void skl_wm_get_hw_state(struct intel_display *display) { - struct intel_display *display = &i915->display; struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->display.dbuf.obj.state); + to_intel_dbuf_state(display->dbuf.obj.state); struct intel_crtc *crtc; if (HAS_MBUS_JOINING(display)) @@ -3193,7 +3133,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915) if (!crtc_state->hw.active) continue; - skl_ddb_get_hw_plane_state(i915, crtc->pipe, + skl_ddb_get_hw_plane_state(display, crtc->pipe, plane_id, ddb, ddb_y, min_ddb, interim_ddb); @@ -3209,13 +3149,13 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915) */ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, dbuf_state->joined_mbus); - mbus_offset = mbus_ddb_offset(i915, slices); + mbus_offset = mbus_ddb_offset(display, slices); crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; /* The slices actually used by the planes on the pipe */ dbuf_state->slices[pipe] = - skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb); + skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb); drm_dbg_kms(display->drm, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", @@ -3228,49 +3168,52 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915) dbuf_state->enabled_slices = display->dbuf.enabled_slices; } -bool skl_watermark_ipc_enabled(struct drm_i915_private *i915) +bool skl_watermark_ipc_enabled(struct intel_display *display) { - return i915->display.wm.ipc_enabled; + return display->wm.ipc_enabled; } -void skl_watermark_ipc_update(struct drm_i915_private *i915) +void skl_watermark_ipc_update(struct intel_display *display) { - if (!HAS_IPC(i915)) + if (!HAS_IPC(display)) return; - intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE, - skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0); + intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE, + skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0); } -static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915) +static bool skl_watermark_ipc_can_enable(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + /* Display WA #0477 WaDisableIPC: skl */ - if (IS_SKYLAKE(i915)) + if (display->platform.skylake) return false; /* Display WA #1141: SKL:all KBL:all CFL */ - if (IS_KABYLAKE(i915) || - IS_COFFEELAKE(i915) || - IS_COMETLAKE(i915)) + if (display->platform.kabylake || + display->platform.coffeelake || + display->platform.cometlake) return i915->dram_info.symmetric_memory; return true; } -void skl_watermark_ipc_init(struct drm_i915_private *i915) +void skl_watermark_ipc_init(struct intel_display *display) { - if (!HAS_IPC(i915)) + if (!HAS_IPC(display)) return; - i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915); + display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display); - skl_watermark_ipc_update(i915); + skl_watermark_ipc_update(display); } static void -adjust_wm_latency(struct drm_i915_private *i915, +adjust_wm_latency(struct intel_display *display, u16 wm[], int num_levels, int read_latency) { + struct drm_i915_private *i915 = to_i915(display->drm); bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; int i, level; @@ -3311,31 +3254,32 @@ adjust_wm_latency(struct drm_i915_private *i915, wm[0] += 1; } -static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +static void mtl_read_wm_latency(struct intel_display *display, u16 wm[]) { - int num_levels = i915->display.wm.num_levels; + int num_levels = display->wm.num_levels; u32 val; - val = intel_de_read(i915, MTL_LATENCY_LP0_LP1); + val = intel_de_read(display, MTL_LATENCY_LP0_LP1); wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); - val = intel_de_read(i915, MTL_LATENCY_LP2_LP3); + val = intel_de_read(display, MTL_LATENCY_LP2_LP3); wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); - val = intel_de_read(i915, MTL_LATENCY_LP4_LP5); + val = intel_de_read(display, MTL_LATENCY_LP4_LP5); wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); - adjust_wm_latency(i915, wm, num_levels, 6); + adjust_wm_latency(display, wm, num_levels, 6); } -static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) { - int num_levels = i915->display.wm.num_levels; - int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; - int mult = IS_DG2(i915) ? 2 : 1; + struct drm_i915_private *i915 = to_i915(display->drm); + int num_levels = display->wm.num_levels; + int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2; + int mult = display->platform.dg2 ? 2 : 1; u32 val; int ret; @@ -3343,7 +3287,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) val = 0; /* data0 to be programmed to 0 for first set */ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { - drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); + drm_err(display->drm, "SKL Mailbox read error = %d\n", ret); return; } @@ -3356,7 +3300,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) val = 1; /* data0 to be programmed to 1 for second set */ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { - drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); + drm_err(display->drm, "SKL Mailbox read error = %d\n", ret); return; } @@ -3365,24 +3309,22 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; - adjust_wm_latency(i915, wm, num_levels, read_latency); + adjust_wm_latency(display, wm, num_levels, read_latency); } -static void skl_setup_wm_latency(struct drm_i915_private *i915) +static void skl_setup_wm_latency(struct intel_display *display) { - struct intel_display *display = &i915->display; - if (HAS_HW_SAGV_WM(display)) display->wm.num_levels = 6; else display->wm.num_levels = 8; if (DISPLAY_VER(display) >= 14) - mtl_read_wm_latency(i915, display->wm.skl_latency); + mtl_read_wm_latency(display, display->wm.skl_latency); else - skl_read_wm_latency(i915, display->wm.skl_latency); + skl_read_wm_latency(display, display->wm.skl_latency); - intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency); + intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency); } static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) @@ -3410,19 +3352,18 @@ static const struct intel_global_state_funcs intel_dbuf_funcs = { struct intel_dbuf_state * intel_atomic_get_dbuf_state(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *dbuf_state; - dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj); + dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj); if (IS_ERR(dbuf_state)) return ERR_CAST(dbuf_state); return to_intel_dbuf_state(dbuf_state); } -int intel_dbuf_init(struct drm_i915_private *i915) +int intel_dbuf_init(struct intel_display *display) { - struct intel_display *display = &i915->display; struct intel_dbuf_state *dbuf_state; dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL); @@ -3457,34 +3398,34 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc, const struct intel_dbuf_state *dbuf_state) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); u32 val = 0; - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) val |= MBUS_DBOX_I_CREDIT(2); - if (DISPLAY_VER(i915) >= 12) { + if (DISPLAY_VER(display) >= 12) { val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; } - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) val |= dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8); - else if (IS_ALDERLAKE_P(i915)) + else if (display->platform.alderlake_p) /* Wa_22010947358:adl-p */ val |= dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4); else val |= MBUS_DBOX_A_CREDIT(2); - if (DISPLAY_VER(i915) >= 14) { + if (DISPLAY_VER(display) >= 14) { val |= MBUS_DBOX_B_CREDIT(0xA); - } else if (IS_ALDERLAKE_P(i915)) { + } else if (display->platform.alderlake_p) { val |= MBUS_DBOX_BW_CREDIT(2); val |= MBUS_DBOX_B_CREDIT(8); - } else if (DISPLAY_VER(i915) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { val |= MBUS_DBOX_BW_CREDIT(2); val |= MBUS_DBOX_B_CREDIT(12); } else { @@ -3492,7 +3433,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc, val |= MBUS_DBOX_B_CREDIT(8); } - if (DISPLAY_VERx100(i915) == 1400) { + if (DISPLAY_VERx100(display) == 1400) { if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes)) val |= MBUS_DBOX_BW_8CREDITS_MTL; else @@ -3502,22 +3443,22 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc, return val; } -static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915, +static void pipe_mbus_dbox_ctl_update(struct intel_display *display, const struct intel_dbuf_state *dbuf_state) { struct intel_crtc *crtc; - for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes) - intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), + for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes) + intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_mbus_dbox_ctl(crtc, dbuf_state)); } static void intel_mbus_dbox_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) return; new_dbuf_state = intel_atomic_get_new_dbuf_state(state); @@ -3527,7 +3468,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state) new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) return; - pipe_mbus_dbox_ctl_update(i915, new_dbuf_state); + pipe_mbus_dbox_ctl_update(display, new_dbuf_state); } int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, @@ -3544,10 +3485,9 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, return intel_atomic_lock_global_state(&dbuf_state->base); } -void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915, +void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display, int ratio, bool joined_mbus) { - struct intel_display *display = &i915->display; enum dbuf_slice slice; if (!HAS_MBUS_JOINING(display)) @@ -3571,7 +3511,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915, static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_dbuf_state *old_dbuf_state = intel_atomic_get_old_dbuf_state(state); const struct intel_dbuf_state *new_dbuf_state = @@ -3586,7 +3526,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio; } - intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio, + intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio, new_dbuf_state->joined_mbus); } @@ -3594,13 +3534,12 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, const struct intel_dbuf_state *dbuf_state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe = ffs(dbuf_state->active_pipes) - 1; const struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; - drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus); - drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes)); + drm_WARN_ON(display->drm, !dbuf_state->joined_mbus); + drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes)); crtc = intel_crtc_for_pipe(display, pipe); new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -3611,7 +3550,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state, return INVALID_PIPE; } -static void mbus_ctl_join_update(struct drm_i915_private *i915, +static void mbus_ctl_join_update(struct intel_display *display, const struct intel_dbuf_state *dbuf_state, enum pipe pipe) { @@ -3627,7 +3566,7 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915, else mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE; - intel_de_rmw(i915, MBUS_CTL, + intel_de_rmw(display, MBUS_CTL, MBUS_HASHING_MODE_MASK | MBUS_JOIN | MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); } @@ -3635,18 +3574,18 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915, static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state, enum pipe pipe) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_dbuf_state *old_dbuf_state = intel_atomic_get_old_dbuf_state(state); const struct intel_dbuf_state *new_dbuf_state = intel_atomic_get_new_dbuf_state(state); - drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n", + drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n", str_yes_no(old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state->joined_mbus), pipe != INVALID_PIPE ? pipe_name(pipe) : '*'); - mbus_ctl_join_update(i915, new_dbuf_state, pipe); + mbus_ctl_join_update(display, new_dbuf_state, pipe); } void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state) @@ -3751,9 +3690,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state) gen9_dbuf_slices_update(display, new_slices); } -static void skl_mbus_sanitize(struct drm_i915_private *i915) +static void skl_mbus_sanitize(struct intel_display *display) { - struct intel_display *display = &i915->display; struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(display->dbuf.obj.state); @@ -3768,28 +3706,28 @@ static void skl_mbus_sanitize(struct drm_i915_private *i915) dbuf_state->active_pipes); dbuf_state->joined_mbus = false; - intel_dbuf_mdclk_cdclk_ratio_update(i915, + intel_dbuf_mdclk_cdclk_ratio_update(display, dbuf_state->mdclk_cdclk_ratio, dbuf_state->joined_mbus); - pipe_mbus_dbox_ctl_update(i915, dbuf_state); - mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE); + pipe_mbus_dbox_ctl_update(display, dbuf_state); + mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE); } -static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +static bool skl_dbuf_is_misconfigured(struct intel_display *display) { const struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->display.dbuf.obj.state); + to_intel_dbuf_state(display->dbuf.obj.state); struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; struct intel_crtc *crtc; - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); entries[crtc->pipe] = crtc_state->wm.skl.ddb; } - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); u8 slices; @@ -3807,7 +3745,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) return false; } -static void skl_dbuf_sanitize(struct drm_i915_private *i915) +static void skl_dbuf_sanitize(struct intel_display *display) { struct intel_crtc *crtc; @@ -3822,12 +3760,12 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915) * all the planes so that skl_commit_modeset_enables() can * simply ignore them. */ - if (!skl_dbuf_is_misconfigured(i915)) + if (!skl_dbuf_is_misconfigured(display)) return; - drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); - for_each_intel_crtc(&i915->drm, crtc) { + for_each_intel_crtc(display->drm, crtc) { struct intel_plane *plane = to_intel_plane(crtc->base.primary); const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); @@ -3837,16 +3775,16 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915) if (plane_state->uapi.visible) intel_plane_disable_noatomic(crtc, plane); - drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + drm_WARN_ON(display->drm, crtc_state->active_planes != 0); memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); } } -static void skl_wm_sanitize(struct drm_i915_private *i915) +static void skl_wm_sanitize(struct intel_display *display) { - skl_mbus_sanitize(i915); - skl_dbuf_sanitize(i915); + skl_mbus_sanitize(display); + skl_dbuf_sanitize(display); } void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc) @@ -3897,7 +3835,6 @@ void intel_wm_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct skl_hw_state { @@ -3912,7 +3849,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state, u8 hw_enabled_slices; int level; - if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) + if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active) return; hw = kzalloc(sizeof(*hw), GFP_KERNEL); @@ -3925,26 +3862,26 @@ void intel_wm_state_verify(struct intel_atomic_state *state, hw_enabled_slices = intel_enabled_dbuf_slices_mask(display); - if (DISPLAY_VER(i915) >= 11 && - hw_enabled_slices != i915->display.dbuf.enabled_slices) - drm_err(&i915->drm, + if (DISPLAY_VER(display) >= 11 && + hw_enabled_slices != display->dbuf.enabled_slices) + drm_err(display->drm, "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", - i915->display.dbuf.enabled_slices, + display->dbuf.enabled_slices, hw_enabled_slices); - for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + for_each_intel_plane_on_crtc(display->drm, crtc, plane) { const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; const struct skl_wm_level *hw_wm_level, *sw_wm_level; /* Watermarks */ - for (level = 0; level < i915->display.wm.num_levels; level++) { + for (level = 0; level < display->wm.num_levels; level++) { hw_wm_level = &hw->wm.planes[plane->id].wm[level]; sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) continue; - drm_err(&i915->drm, + drm_err(display->drm, "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, level, sw_wm_level->enable, @@ -3959,7 +3896,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state, sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&i915->drm, + drm_err(display->drm, "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, sw_wm_level->enable, @@ -3975,7 +3912,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state, if (HAS_HW_SAGV_WM(display) && !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&i915->drm, + drm_err(display->drm, "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, sw_wm_level->enable, @@ -3991,7 +3928,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state, if (HAS_HW_SAGV_WM(display) && !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&i915->drm, + drm_err(display->drm, "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", plane->base.base.id, plane->base.name, sw_wm_level->enable, @@ -4007,7 +3944,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state, sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { - drm_err(&i915->drm, + drm_err(display->drm, "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", plane->base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry->end, @@ -4024,29 +3961,29 @@ static const struct intel_wm_funcs skl_wm_funcs = { .sanitize = skl_wm_sanitize, }; -void skl_wm_init(struct drm_i915_private *i915) +void skl_wm_init(struct intel_display *display) { - intel_sagv_init(i915); + intel_sagv_init(display); - skl_setup_wm_latency(i915); + skl_setup_wm_latency(display); - i915->display.funcs.wm = &skl_wm_funcs; + display->funcs.wm = &skl_wm_funcs; } static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) { - struct drm_i915_private *i915 = m->private; + struct intel_display *display = m->private; seq_printf(m, "Isochronous Priority Control: %s\n", - str_yes_no(skl_watermark_ipc_enabled(i915))); + str_yes_no(skl_watermark_ipc_enabled(display))); return 0; } static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file) { - struct drm_i915_private *i915 = inode->i_private; + struct intel_display *display = inode->i_private; - return single_open(file, skl_watermark_ipc_status_show, i915); + return single_open(file, skl_watermark_ipc_status_show, display); } static ssize_t skl_watermark_ipc_status_write(struct file *file, @@ -4054,8 +3991,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *i915 = m->private; - intel_wakeref_t wakeref; + struct intel_display *display = m->private; bool enable; int ret; @@ -4063,12 +3999,12 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file, if (ret < 0) return ret; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - if (!skl_watermark_ipc_enabled(i915) && enable) - drm_info(&i915->drm, + with_intel_display_rpm(display) { + if (!skl_watermark_ipc_enabled(display) && enable) + drm_info(display->drm, "Enabling IPC: WM will be proper only after next commit\n"); - i915->display.wm.ipc_enabled = enable; - skl_watermark_ipc_update(i915); + display->wm.ipc_enabled = enable; + skl_watermark_ipc_update(display); } return len; @@ -4085,7 +4021,7 @@ static const struct file_operations skl_watermark_ipc_status_fops = { static int intel_sagv_status_show(struct seq_file *m, void *unused) { - struct drm_i915_private *i915 = m->private; + struct intel_display *display = m->private; static const char * const sagv_status[] = { [I915_SAGV_UNKNOWN] = "unknown", [I915_SAGV_DISABLED] = "disabled", @@ -4093,37 +4029,36 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused) [I915_SAGV_NOT_CONTROLLED] = "not controlled", }; - seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); + seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display))); seq_printf(m, "SAGV modparam: %s\n", - str_enabled_disabled(i915->display.params.enable_sagv)); - seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); - seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); + str_enabled_disabled(display->params.enable_sagv)); + seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]); + seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us); return 0; } DEFINE_SHOW_ATTRIBUTE(intel_sagv_status); -void skl_watermark_debugfs_register(struct drm_i915_private *i915) +void skl_watermark_debugfs_register(struct intel_display *display) { - struct intel_display *display = &i915->display; struct drm_minor *minor = display->drm->primary; if (HAS_IPC(display)) - debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, - &skl_watermark_ipc_status_fops); + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, + display, &skl_watermark_ipc_status_fops); if (HAS_SAGV(display)) - debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915, - &intel_sagv_status_fops); + debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, + display, &intel_sagv_status_fops); } -unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level) +unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level) { int level; - for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) { - unsigned int latency = skl_wm_latency(i915, level, NULL); + for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) { + unsigned int latency = skl_wm_latency(display, level, NULL); if (latency) return latency; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index d9cff6c54310..95b0b599d5c3 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -12,7 +12,6 @@ #include "intel_global_state.h" #include "intel_wm_types.h" -struct drm_i915_private; struct intel_atomic_state; struct intel_bw_state; struct intel_crtc; @@ -27,11 +26,12 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); void intel_sagv_post_plane_update(struct intel_atomic_state *state); -bool intel_can_enable_sagv(struct drm_i915_private *i915, +bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state); +bool intel_can_enable_sagv(struct intel_display *display, const struct intel_bw_state *bw_state); -bool intel_has_sagv(struct drm_i915_private *i915); +bool intel_has_sagv(struct intel_display *display); -u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, +u32 skl_ddb_dbuf_slice_mask(struct intel_display *display, const struct skl_ddb_entry *entry); bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, @@ -45,14 +45,14 @@ void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc); void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane *plane); -void skl_watermark_ipc_init(struct drm_i915_private *i915); -void skl_watermark_ipc_update(struct drm_i915_private *i915); -bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); -void skl_watermark_debugfs_register(struct drm_i915_private *i915); +void skl_watermark_ipc_init(struct intel_display *display); +void skl_watermark_ipc_update(struct intel_display *display); +bool skl_watermark_ipc_enabled(struct intel_display *display); +void skl_watermark_debugfs_register(struct intel_display *display); -unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, +unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level); -void skl_wm_init(struct drm_i915_private *i915); +void skl_wm_init(struct intel_display *display); const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, enum plane_id plane_id, @@ -86,13 +86,13 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state); #define intel_atomic_get_new_dbuf_state(state) \ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) -int intel_dbuf_init(struct drm_i915_private *i915); +int intel_dbuf_init(struct intel_display *display); int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, int ratio); void intel_dbuf_pre_plane_update(struct intel_atomic_state *state); void intel_dbuf_post_plane_update(struct intel_atomic_state *state); -void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915, +void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display, int ratio, bool joined_mbus); void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state); void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index af717df83197..346737f15fa9 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -251,8 +251,10 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, return 0; } -static void band_gap_reset(struct drm_i915_private *dev_priv) +static void band_gap_reset(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + vlv_flisdsi_get(dev_priv); vlv_flisdsi_write(dev_priv, 0x08, 0x0001); @@ -269,13 +271,13 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, else pipe_config->pipe_bpp = 18; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { /* Enable Frame time stamp based scanline reporting */ pipe_config->mode_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; @@ -468,7 +470,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) vlv_flisdsi_put(dev_priv); /* bandgap reset is needed after everytime we do power gate */ - band_gap_reset(dev_priv); + band_gap_reset(display); for_each_dsi_port(port, intel_dsi->ports) { @@ -495,11 +497,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) static void intel_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) glk_dsi_device_ready(encoder); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) bxt_dsi_device_ready(encoder); else vlv_dsi_device_ready(encoder); @@ -559,23 +561,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) glk_dsi_disable_mipi_io(encoder); } -static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port) +static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port) { - return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ? + return display->platform.geminilake || display->platform.broxton ? BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port); } static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(display->drm, "\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ - i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? + i915_reg_t port_ctrl = display->platform.broxton ? BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A); intel_de_write(display, MIPI_DEVICE_READY(display, port), @@ -594,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI * Port A only. MIPI Port C has no similar bit for checking. */ - if ((IS_BROXTON(dev_priv) || port == PORT_A) && + if ((display->platform.broxton || port == PORT_A) && intel_de_wait_for_clear(display, port_ctrl, AFE_LATCHOUT, 30)) drm_err(display->drm, "DSI LP not going Low\n"); @@ -612,7 +613,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -620,7 +620,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { u32 temp = intel_dsi->pixel_overlap; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(display, MIPI_CTRL(display, port), BXT_PIXEL_OVERLAP_CNT_MASK, @@ -633,7 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + i915_reg_t port_ctrl = port_ctrl_reg(display, port); u32 temp; temp = intel_de_read(display, port_ctrl); @@ -644,7 +644,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { temp |= (intel_dsi->dual_link - 1) << DUAL_LINK_MODE_SHIFT; - if (IS_BROXTON(dev_priv)) + if (display->platform.broxton) temp |= LANE_CONFIGURATION_DUAL_LINK_A; else temp |= crtc->pipe ? @@ -664,12 +664,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + i915_reg_t port_ctrl = port_ctrl_reg(display, port); /* de-assert ip_tg_enable signal */ intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0); @@ -730,7 +729,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum port port; bool glk_cold_boot = false; @@ -745,7 +743,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, * The BIOS may leave the PLL in a wonky state where it doesn't * lock. It needs to be fully powered down to fix it. */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { bxt_dsi_pll_disable(encoder); bxt_dsi_pll_enable(encoder, pipe_config); } else { @@ -753,7 +751,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, vlv_dsi_pll_enable(encoder, pipe_config); } - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { /* Add MIPI IO reset programming for modeset */ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); @@ -762,13 +760,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0); } - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (display->platform.valleyview || display->platform.cherryview) { /* Disable DPOunit clock gating, can stall pipe */ - intel_de_rmw(display, DSPCLK_GATE_D(dev_priv), + intel_de_rmw(display, DSPCLK_GATE_D(display), 0, DPOUNIT_CLOCK_GATE_DISABLE); } - if (!IS_GEMINILAKE(dev_priv)) + if (!display->platform.geminilake) intel_dsi_prepare(encoder, pipe_config); /* Give the panel time to power-on and then deassert its reset */ @@ -776,7 +774,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, msleep(intel_dsi->panel_on_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); - if (IS_GEMINILAKE(dev_priv)) { + if (display->platform.geminilake) { glk_cold_boot = glk_dsi_enable_io(encoder); /* Prepare port in cold boot(s3/s4) scenario */ @@ -788,7 +786,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, intel_dsi_device_ready(encoder); /* Prepare port in normal boot scenario */ - if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot) + if (display->platform.geminilake && !glk_cold_boot) intel_dsi_prepare(encoder, pipe_config); /* Send initialization commands in LP mode */ @@ -836,11 +834,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&i915->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_backlight_disable(old_conn_state); @@ -860,9 +858,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state, static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) glk_dsi_clear_device_ready(encoder); else vlv_dsi_clear_device_ready(encoder); @@ -874,13 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(display->drm, "\n"); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { intel_crtc_vblank_off(old_crtc_state); skl_scaler_disable(old_crtc_state); @@ -907,7 +904,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, /* Transition to LP-00 */ intel_dsi_clear_device_ready(encoder); - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { /* Power down DSI regulator to save power */ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, @@ -917,12 +914,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); } - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { bxt_dsi_pll_disable(encoder); } else { vlv_dsi_pll_disable(encoder); - intel_de_rmw(display, DSPCLK_GATE_D(dev_priv), + intel_de_rmw(display, DSPCLK_GATE_D(display), DPOUNIT_CLOCK_GATE_DISABLE, 0); } @@ -939,7 +936,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_wakeref_t wakeref; enum port port; @@ -957,13 +953,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * configuration, otherwise accessing DSI registers will hang the * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - !bxt_dsi_pll_is_enabled(dev_priv)) + if ((display->platform.geminilake || display->platform.broxton) && + !bxt_dsi_pll_is_enabled(display)) goto out_put_power; /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + i915_reg_t port_ctrl = port_ctrl_reg(display, port); bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE; /* @@ -971,10 +967,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * bit in port C control register does not get set. As a * workaround, check pipe B conf instead. */ - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if ((display->platform.valleyview || display->platform.cherryview) && port == PORT_C) enabled = intel_de_read(display, - TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE; + TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { @@ -989,7 +985,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) continue; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { u32 tmp = intel_de_read(display, MIPI_CTRL(display, port)); tmp &= BXT_PIPE_SELECT_MASK; tmp >>= BXT_PIPE_SELECT_SHIFT; @@ -1177,15 +1173,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 pclk; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { bxt_dsi_get_pipe_config(encoder, pipe_config); pclk = bxt_dsi_get_pclk(encoder, pipe_config); } else { @@ -1218,7 +1214,6 @@ static void set_dsi_timings(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1253,7 +1248,7 @@ static void set_dsi_timings(struct intel_encoder *encoder, hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); for_each_dsi_port(port, intel_dsi->ports) { - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { /* * Program hdisplay and vdisplay on MIPI transcoder. * This is different from calculated hactive and @@ -1307,7 +1302,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -1327,7 +1321,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (display->platform.valleyview || display->platform.cherryview) { /* * escape clock divider, 20MHz, shared for A and C. * device ready must be off when doing this! txclkesc? @@ -1342,7 +1336,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, tmp &= ~READ_REQUEST_PRIORITY_MASK; intel_de_write(display, MIPI_CTRL(display, port), tmp | READ_REQUEST_PRIORITY_HIGH); - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + } else if (display->platform.geminilake || display->platform.broxton) { enum pipe pipe = crtc->pipe; intel_de_rmw(display, MIPI_CTRL(display, port), @@ -1377,7 +1371,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, if (intel_dsi->clock_stop) tmp |= CLOCKSTOP; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { tmp |= BXT_DPHY_DEFEATURE_EN; if (!is_cmd_mode(intel_dsi)) tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; @@ -1424,7 +1418,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, intel_de_write(display, MIPI_INIT_COUNT(display, port), txclkesc(intel_dsi->escape_clk_div, 100)); - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && + if ((display->platform.geminilake || display->platform.broxton) && !intel_dsi->dual_link) { /* * BXT spec says write MIPI_INIT_COUNT for @@ -1461,7 +1455,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, intel_de_write(display, MIPI_LP_BYTECLK(display, port), intel_dsi->lp_byte_clk); - if (IS_GEMINILAKE(dev_priv)) { + if (display->platform.geminilake) { intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port), intel_dsi->lp_byte_clk); /* Shadow of DPHY reg */ @@ -1513,18 +1507,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, static void intel_dsi_unprepare(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) return; for_each_dsi_port(port, intel_dsi->ports) { /* Panel commands can be sent when clock is in LP11 */ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) bxt_dsi_reset_clocks(encoder, port); else vlv_dsi_reset_clocks(encoder, port); @@ -1596,8 +1589,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector) static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); struct intel_connector *connector = intel_dsi->attached_connector; + struct intel_display *display = to_intel_display(connector); struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns, extra_byte_count, tlpx_ui; u32 ui_num, ui_den; @@ -1645,7 +1638,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) * For GEMINILAKE dphy_param_reg will be programmed in terms of * HS byte clock count for other platform in HS ddr clock count */ - mul = IS_GEMINILAKE(dev_priv) ? 8 : 2; + mul = display->platform.geminilake ? 8 : 2; ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); @@ -1653,7 +1646,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul); if (prepare_cnt > PREPARE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n", + drm_dbg_kms(display->drm, "prepare count too high %u\n", prepare_cnt); prepare_cnt = PREPARE_CNT_MAX; } @@ -1674,7 +1667,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) exit_zero_cnt += 1; if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n", + drm_dbg_kms(display->drm, "exit zero count too high %u\n", exit_zero_cnt); exit_zero_cnt = EXIT_ZERO_CNT_MAX; } @@ -1685,7 +1678,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) * ui_den, ui_num * mul); if (clk_zero_cnt > CLK_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n", + drm_dbg_kms(display->drm, "clock zero count too high %u\n", clk_zero_cnt); clk_zero_cnt = CLK_ZERO_CNT_MAX; } @@ -1695,7 +1688,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul); if (trail_cnt > TRAIL_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n", + drm_dbg_kms(display->drm, "trail count too high %u\n", trail_cnt); trail_cnt = TRAIL_CNT_MAX; } @@ -1761,7 +1754,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; @@ -1770,7 +1763,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. */ - if (IS_VALLEYVIEW(dev_priv)) + if (display->platform.valleyview) return 320000; /* @@ -1778,7 +1771,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) * picture gets unstable, despite that values are * correct for DSI PLL and DE PLL. */ - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) return 158400; return 0; @@ -1903,9 +1896,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = { { } }; -void vlv_dsi_init(struct drm_i915_private *dev_priv) +void vlv_dsi_init(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *connector; @@ -1914,16 +1906,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) enum port port; enum pipe pipe; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); /* There is no detection method for MIPI so rely on VBT */ if (!intel_bios_is_dsi_present(display, &port)) return; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE; + if (display->platform.geminilake || display->platform.broxton) + display->dsi.mmio_base = BXT_MIPI_BASE; else - dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE; + display->dsi.mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) @@ -1938,12 +1930,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) encoder = &intel_dsi->base; intel_dsi->attached_connector = connector; - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs, + drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); encoder->compute_config = intel_dsi_compute_config; encoder->pre_enable = intel_dsi_pre_enable; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) encoder->enable = bxt_dsi_enable; encoder->disable = intel_dsi_disable; encoder->post_disable = intel_dsi_post_disable; @@ -1963,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI * port C. BXT isn't limited like this. */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) encoder->pipe_mask = ~0; else if (port == PORT_A) encoder->pipe_mask = BIT(PIPE_A); @@ -1979,10 +1971,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) else intel_dsi->ports = BIT(port); - if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; - if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; /* Create a DSI host (and a device) for each port. */ @@ -1998,18 +1990,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - drm_dbg_kms(&dev_priv->drm, "no device found\n"); + drm_dbg_kms(display->drm, "no device found\n"); goto err; } /* Use clock read-back from current hw-state for fastboot */ current_mode = intel_encoder_current_mode(encoder); if (current_mode) { - drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n", + drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n", intel_dsi->pclk, current_mode->clock); if (intel_fuzzy_clock_check(intel_dsi->pclk, current_mode->clock)) { - drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n"); + drm_dbg_kms(display->drm, "Using GOP pclk\n"); intel_dsi->pclk = current_mode->clock; } @@ -2021,7 +2013,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_dsi_vbt_gpio_init(intel_dsi, intel_dsi_get_hw_state(encoder, &pipe)); - drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs, + drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs); @@ -2030,12 +2022,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_connector_attach_encoder(connector, encoder); - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(connector); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!intel_panel_preferred_fixed_mode(connector)) { - drm_dbg_kms(&dev_priv->drm, "no fixed mode\n"); + drm_dbg_kms(display->drm, "no fixed mode\n"); goto err_cleanup_connector; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h index 277bacfbc551..ff349b5876c2 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi.h @@ -7,14 +7,14 @@ #define __VLV_DSI_H__ enum port; -struct drm_i915_private; struct intel_crtc_state; +struct intel_display; struct intel_dsi; #ifdef I915 void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state); -void vlv_dsi_init(struct drm_i915_private *dev_priv); +void vlv_dsi_init(struct intel_display *display); #else static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { @@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) { return 0; } -static inline void vlv_dsi_init(struct drm_i915_private *dev_priv) +static inline void vlv_dsi_init(struct intel_display *display) { } #endif diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 2ed47e7d1051..7ce924a5ef90 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -57,7 +57,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt, return dsi_clk_khz; } -static int dsi_calc_mnp(struct drm_i915_private *dev_priv, +static int dsi_calc_mnp(struct intel_display *display, struct intel_crtc_state *config, int target_dsi_clk) { @@ -68,11 +68,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, /* target_dsi_clk is expected in kHz */ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { - drm_err(&dev_priv->drm, "DSI CLK Out of Range\n"); + drm_err(display->drm, "DSI CLK Out of Range\n"); return -ECHRNG; } - if (IS_CHERRYVIEW(dev_priv)) { + if (display->platform.cherryview) { ref_clk = 100000; n = 4; m_min = 70; @@ -116,13 +116,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, static int vlv_dsi_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock; u32 pll_ctl, pll_div; u32 m = 0, p = 0, n; - int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; + int refclk = display->platform.cherryview ? 100000 : 25000; int i; pll_ctl = config->dsi_pll.ctrl; @@ -147,7 +147,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, p--; if (!p) { - drm_err(&dev_priv->drm, "wrong P1 divisor\n"); + drm_err(display->drm, "wrong P1 divisor\n"); return 0; } @@ -157,7 +157,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, } if (i == ARRAY_SIZE(lfsr_converts)) { - drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); + drm_err(display->drm, "wrong m_seed programmed\n"); return 0; } @@ -175,16 +175,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, int vlv_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int pclk, dsi_clk, ret; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); - ret = dsi_calc_mnp(dev_priv, config, dsi_clk); + ret = dsi_calc_mnp(display, config, dsi_clk); if (ret) { - drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n"); + drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n"); return ret; } @@ -196,7 +196,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, config->dsi_pll.ctrl |= DSI_PLL_VCO_EN; - drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n", + drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n", config->dsi_pll.div, config->dsi_pll.ctrl); pclk = vlv_dsi_pclk(encoder, config); @@ -213,9 +213,10 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, void vlv_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_cck_get(dev_priv); @@ -235,20 +236,21 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder, DSI_PLL_LOCK, 20)) { vlv_cck_put(dev_priv); - drm_err(&dev_priv->drm, "DSI PLL lock failed\n"); + drm_err(display->drm, "DSI PLL lock failed\n"); return; } vlv_cck_put(dev_priv); - drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); + drm_dbg_kms(display->drm, "DSI PLL locked\n"); } void vlv_dsi_pll_disable(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_cck_get(dev_priv); @@ -260,14 +262,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder) vlv_cck_put(dev_priv); } -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +bool bxt_dsi_pll_is_enabled(struct intel_display *display) { bool enabled; u32 val; u32 mask; mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED; - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); + val = intel_de_read(display, BXT_DSI_PLL_ENABLE); enabled = (val & mask) == mask; if (!enabled) @@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) * times, and since accessing DSI registers with invalid dividers * causes a system hang. */ - val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); - if (IS_GEMINILAKE(dev_priv)) { + val = intel_de_read(display, BXT_DSI_PLL_CTL); + if (display->platform.geminilake) { if (!(val & BXT_DSIA_16X_MASK)) { - drm_dbg(&dev_priv->drm, - "Invalid PLL divider (%08x)\n", val); + drm_dbg_kms(display->drm, + "Invalid PLL divider (%08x)\n", val); enabled = false; } } else { if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) { - drm_dbg(&dev_priv->drm, - "Invalid PLL divider (%08x)\n", val); + drm_dbg_kms(display->drm, + "Invalid PLL divider (%08x)\n", val); enabled = false; } } @@ -301,29 +303,30 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) void bxt_dsi_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); - intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); + intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); /* * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE, + if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timeout waiting for PLL lock deassertion\n"); } u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 pll_ctl, pll_div; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_cck_get(dev_priv); pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); @@ -352,14 +355,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder, u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 pclk; - config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); + config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL); pclk = bxt_dsi_pclk(encoder, config); - drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk); + drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk); return pclk; } @@ -375,10 +378,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT); } -static void glk_dsi_program_esc_clock(struct drm_device *dev, - const struct intel_crtc_state *config) +static void glk_dsi_program_esc_clock(struct intel_display *display, + const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dsi_rate = 0; u32 pll_ratio = 0; u32 ddr_clk = 0; @@ -415,17 +417,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, txesc2_div = min_t(u32, div2_value, 10); - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, + intel_de_write(display, MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, + intel_de_write(display, MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ -static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, +static void bxt_dsi_program_clocks(struct intel_display *display, enum port port, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; u32 dsi_rate = 0; u32 pll_ratio = 0; @@ -436,7 +437,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, u32 mipi_8by3_divider; /* Clear old configurations */ - tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); + tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); @@ -472,13 +473,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower); tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper); - intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); + intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp); } int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; @@ -494,7 +495,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, */ dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ); - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN; dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX; } else { @@ -503,11 +504,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, } if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Can't get a suitable ratio from DSI PLL ratios\n"); return -ECHRNG; } else - drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n"); + drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n"); /* * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x @@ -519,7 +520,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, /* As per recommendation from hardware team, * Prog PVD ratio =1 if dsi ratio <= 50 */ - if (IS_BROXTON(dev_priv) && dsi_ratio <= 50) + if (display->platform.broxton && dsi_ratio <= 50) config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1; pclk = bxt_dsi_pclk(encoder, config); @@ -536,46 +537,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); /* Configure PLL vales */ - intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); - intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL); + intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); + intel_de_posting_read(display, BXT_DSI_PLL_CTL); /* Program TX, RX, Dphy clocks */ - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { for_each_dsi_port(port, intel_dsi->ports) - bxt_dsi_program_clocks(encoder->base.dev, port, config); + bxt_dsi_program_clocks(display, port, config); } else { - glk_dsi_program_esc_clock(encoder->base.dev, config); + glk_dsi_program_esc_clock(display, config); } /* Enable DSI PLL */ - intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); + intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); /* Timeout and fail if PLL not locked */ - if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, + if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timed out waiting for DSI PLL to lock\n"); return; } - drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); + drm_dbg_kms(display->drm, "DSI PLL locked\n"); } void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; /* Clear old configurations */ - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h index f975660fa609..f26e31a7dd69 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h @@ -9,7 +9,6 @@ #include <linux/types.h> enum port; -struct drm_i915_private; struct intel_crtc_state; struct intel_display; struct intel_encoder; @@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); #ifdef I915 -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +bool bxt_dsi_pll_is_enabled(struct intel_display *display); void assert_dsi_pll_enabled(struct intel_display *display); void assert_dsi_pll_disabled(struct intel_display *display); #else -static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display) { return false; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 388f90784d8a..f566191d843b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -48,8 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) i915_gem_object_evictable(obj)) assert_object_held(obj); #endif - return mr && (mr->type == INTEL_MEMORY_LOCAL || - mr->type == INTEL_MEMORY_STOLEN_LOCAL); + return mr && intel_memory_type_is_local(mr->type); } /** diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index f6c59f20832f..46a5aa4ab9c8 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -289,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } +static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local) +{ + *is_present = pte & GEN8_PAGE_PRESENT; + *is_local = pte & GEN12_GGTT_PTE_LM; + + return pte & GEN12_GGTT_PTE_ADDR_MASK; +} + static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt) { struct intel_gt *gt = ggtt->vm.gt; @@ -435,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) writeq(pte, addr); } +static gen8_pte_t gen8_get_pte(void __iomem *addr) +{ + return readq(addr); +} + static void gen8_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -450,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local); +} + static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm, dma_addr_t addr, u64 offset, unsigned int pat_index, u32 flags) @@ -605,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, + bool *is_present, bool *is_local) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; + + return vm->pte_decode(ioread32(pte), is_present, is_local); +} + /* * Binds an object into the global gtt with the specified cache level. * The object will be accessible to the GPU via commands whose operands @@ -769,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm, vm->clear_range(vm, vma_res->start, vma_res->vma_size); } +dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + return ggtt->vm.read_entry(vm, offset, is_present, is_local); +} + /* * Reserve the top of the GuC address space for firmware images. Addresses * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC, @@ -1245,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.scratch_range = gen8_ggtt_clear_range; ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + ggtt->vm.read_entry = gen8_ggtt_read_entry; /* * Serialize GTT updates with aperture access on BXT if VT-d is on, @@ -1291,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) else ggtt->vm.pte_encode = gen8_ggtt_pte_encode; + ggtt->vm.pte_decode = gen8_ggtt_pte_decode; + return ggtt_probe_common(ggtt, size); } @@ -1390,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr, return pte; } +static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local) +{ + *is_present = pte & GEN6_PTE_VALID; + *is_local = false; + + return ((pte & 0xff0) << 28) | (pte & ~0xfff); +} + static int gen6_gmch_probe(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; @@ -1428,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.scratch_range = gen6_ggtt_clear_range; ggtt->vm.insert_page = gen6_ggtt_insert_page; ggtt->vm.insert_entries = gen6_ggtt_insert_entries; + ggtt->vm.read_entry = gen6_ggtt_read_entry; ggtt->vm.cleanup = gen6_gmch_remove; ggtt->invalidate = gen6_ggtt_invalidate; @@ -1443,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) else ggtt->vm.pte_encode = snb_pte_encode; + ggtt->vm.pte_decode = gen6_pte_decode; + ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c index 59eed0a0ce90..c5f5f0bdfb2c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c @@ -27,6 +27,13 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm, intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); } +static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local) +{ + return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT, + is_present, is_local); +} + static void gmch_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, @@ -103,6 +110,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_entries = gmch_ggtt_insert_entries; ggtt->vm.clear_range = gmch_ggtt_clear_range; ggtt->vm.scratch_range = gmch_ggtt_clear_range; + ggtt->vm.read_entry = gmch_ggtt_read_entry; ggtt->vm.cleanup = gmch_ggtt_remove; ggtt->invalidate = gmch_ggtt_invalidate; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 0a36ea751b63..9d3a3ad567a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -312,6 +312,7 @@ struct i915_address_space { u64 (*pte_encode)(dma_addr_t addr, unsigned int pat_index, u32 flags); /* Create a valid PTE */ + dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local); #define PTE_READ_ONLY BIT(0) #define PTE_LM BIT(1) @@ -340,6 +341,8 @@ struct i915_address_space { struct i915_vma_resource *vma_res, unsigned int pat_index, u32 flags); + dma_addr_t (*read_entry)(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local); void (*cleanup)(struct i915_address_space *vm); void (*foreach)(struct i915_address_space *vm, @@ -590,6 +593,9 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm, void intel_ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma_resource *vma_res); +dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm, + u64 offset, bool *is_present, bool *is_local); + int i915_ggtt_probe_hw(struct drm_i915_private *i915); int i915_ggtt_init_hw(struct drm_i915_private *i915); int i915_ggtt_enable_hw(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 64e9317f58fb..8731f275fdd9 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore) static bool gen5_rps_enable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_display *display = &i915->display; struct intel_uncore *uncore = rps_to_uncore(rps); u8 fstart, vstart; u32 rgvmodectl; @@ -608,7 +609,7 @@ static bool gen5_rps_enable(struct intel_rps *rps) rps->ips.last_time2 = ktime_get_raw_ns(); spin_lock(&i915->irq_lock); - ilk_enable_display_irq(i915, DE_PCU_EVENT); + ilk_enable_display_irq(display, DE_PCU_EVENT); spin_unlock(&i915->irq_lock); spin_unlock_irq(&mchdev_lock); @@ -621,13 +622,14 @@ static bool gen5_rps_enable(struct intel_rps *rps) static void gen5_rps_disable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_display *display = &i915->display; struct intel_uncore *uncore = rps_to_uncore(rps); u16 rgvswctl; spin_lock_irq(&mchdev_lock); spin_lock(&i915->irq_lock); - ilk_disable_display_irq(i915, DE_PCU_EVENT); + ilk_disable_display_irq(display, DE_PCU_EVENT); spin_unlock(&i915->irq_lock); rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index eedd1865bb98..62d14f82256f 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) unsigned int flags; u64 start, end, size; struct drm_mm_node *node; + intel_wakeref_t wakeref; int ret; if (high_gm) { @@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) } mutex_lock(>->ggtt->vm.mutex); - mmio_hw_access_pre(gt); + wakeref = mmio_hw_access_pre(gt); ret = i915_gem_gtt_insert(>->ggtt->vm, NULL, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); - mmio_hw_access_post(gt); + mmio_hw_access_post(gt, wakeref); mutex_unlock(>->ggtt->vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", @@ -226,7 +227,7 @@ out_free_fence: vgpu->fence.regs[i] = NULL; } mutex_unlock(&gvt->gt->ggtt->vm.mutex); - intel_runtime_pm_put_unchecked(uncore->rpm); + intel_runtime_pm_put(uncore->rpm, wakeref); return -ENOSPC; } diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index baccbf1761b7..673534f061ef 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) .diff = 0, }; struct diff_mmio *node, *next; + intel_wakeref_t wakeref; INIT_LIST_HEAD(¶m.diff_mmio_list); mutex_lock(&gvt->lock); spin_lock_bh(&gvt->scheduler.mmio_context_lock); - mmio_hw_access_pre(gvt->gt); + wakeref = mmio_hw_access_pre(gvt->gt); /* Recognize all the diff mmios to list. */ intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, ¶m); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); spin_unlock_bh(&gvt->scheduler.mmio_context_lock); mutex_unlock(&gvt->lock); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 2fa7ca19ba5d..ae9b0ded3651 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) static void ggtt_invalidate(struct intel_gt *gt) { - mmio_hw_access_pre(gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gt); intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - mmio_hw_access_post(gt); + mmio_hw_access_post(gt, wakeref); } static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 01d890999f25..1d10c16e6465 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -570,14 +570,15 @@ enum { GVT_FAILSAFE_GUEST_ERR, }; -static inline void mmio_hw_access_pre(struct intel_gt *gt) +static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt) { - intel_runtime_pm_get(gt->uncore->rpm); + return intel_runtime_pm_get(gt->uncore->rpm); } -static inline void mmio_hw_access_post(struct intel_gt *gt) +static inline void mmio_hw_access_post(struct intel_gt *gt, + intel_wakeref_t wakeref) { - intel_runtime_pm_put_unchecked(gt->uncore->rpm); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); } /** diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 4efee6797873..e6e9010462e3 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -264,6 +264,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, { struct intel_gvt *gvt = vgpu->gvt; unsigned int fence_num = offset_to_fence_num(off); + intel_wakeref_t wakeref; int ret; ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes); @@ -271,10 +272,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; write_vreg(vgpu, off, p_data, bytes); - mmio_hw_access_pre(gvt->gt); + wakeref = mmio_hw_access_pre(gvt->gt); intel_vgpu_write_fence(vgpu, fence_num, vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); return 0; } @@ -513,7 +514,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) switch (wrpll_ctl & WRPLL_REF_MASK) { case WRPLL_REF_PCH_SSC: - refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; + refclk = 135000; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -544,7 +545,7 @@ out: static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; - int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; + int refclk = 100000; enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; struct dpll clock = {}; @@ -1975,10 +1976,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, vgpu == gvt->scheduler.engine_owner[engine->id] || offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) || offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) { - mmio_hw_access_pre(gvt->gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gvt->gt); vgpu_vreg(vgpu, offset) = intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); } return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); @@ -3209,10 +3212,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt) int i, id; idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { - mmio_hw_access_pre(gvt->gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gvt->gt); for (i = 0; i < vgpu_fence_sz(vgpu); i++) intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); } } @@ -3233,8 +3238,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt) int id; idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { - mmio_hw_access_pre(gvt->gt); + intel_wakeref_t wakeref; + + wakeref = mmio_hw_access_pre(gvt->gt); intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); - mmio_hw_access_post(gvt->gt); + mmio_hw_access_post(gvt->gt, wakeref); } } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 9f97f743aa71..6c2d68e88266 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -447,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; if (!vgpu_data->active) return; @@ -465,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } - intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for_each_engine(engine, vgpu->gvt->gt, id) { if (scheduler->engine_owner[engine->id] == vgpu) { @@ -474,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0d9e263913ff..967c0501e91e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -66,8 +66,6 @@ static int i915_capabilities(struct seq_file *m, void *data) struct drm_i915_private *i915 = node_to_i915(m->private); struct drm_printer p = drm_seq_file_printer(m); - seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); - intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p); i915_print_iommu_status(i915, &p); intel_gt_info_print(&to_gt(i915)->info, &p); diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index ce3cc93ea211..f5262b8ad237 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -578,7 +578,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) */ intel_dram_detect(dev_priv); - intel_bw_init_hw(dev_priv); + intel_bw_init_hw(display); return 0; @@ -622,11 +622,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) * Perform any steps necessary to make the driver available via kernel * internal or userspace interfaces. */ -static void i915_driver_register(struct drm_i915_private *dev_priv) +static int i915_driver_register(struct drm_i915_private *dev_priv) { struct intel_display *display = &dev_priv->display; struct intel_gt *gt; unsigned int i; + int ret; i915_gem_driver_register(dev_priv); i915_pmu_register(dev_priv); @@ -634,10 +635,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) intel_vgpu_register(dev_priv); /* Reveal our presence to userspace */ - if (drm_dev_register(&dev_priv->drm, 0)) { - drm_err(&dev_priv->drm, - "Failed to register driver for userspace access!\n"); - return; + ret = drm_dev_register(&dev_priv->drm, 0); + if (ret) { + i915_probe_error(dev_priv, + "Failed to register driver for userspace access!\n"); + drm_dev_unregister(&dev_priv->drm); + i915_pmu_unregister(dev_priv); + i915_gem_driver_unregister(dev_priv); + return ret; } i915_debugfs_register(dev_priv); @@ -660,6 +665,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) if (i915_switcheroo_register(dev_priv)) drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); + + return 0; } /** @@ -834,7 +841,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_cleanup_gem; - i915_driver_register(i915); + ret = i915_driver_register(i915); + if (ret) + goto out_cleanup_gem; enable_rpm_wakeref_asserts(&i915->runtime_pm); @@ -845,6 +854,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out_cleanup_gem: + intel_pxp_fini(i915); i915_gem_suspend(i915); i915_gem_driver_remove(i915); i915_gem_driver_release(i915); @@ -981,7 +991,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_dp_mst_suspend(display); intel_irq_suspend(i915); - intel_hpd_cancel_work(i915); + intel_hpd_cancel_work(display); if (HAS_DISPLAY(i915)) intel_display_driver_suspend_access(display); @@ -1064,7 +1074,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_display_driver_suspend(display); intel_irq_suspend(dev_priv); - intel_hpd_cancel_work(dev_priv); + intel_hpd_cancel_work(display); if (HAS_DISPLAY(dev_priv)) intel_display_driver_suspend_access(display); @@ -1201,7 +1211,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_pps_unlock_regs_wa(display); - intel_init_pch_refclk(dev_priv); + intel_init_pch_refclk(display); /* * Interrupts have to be enabled before any batches are run. If not the @@ -1227,7 +1237,7 @@ static int i915_drm_resume(struct drm_device *dev) if (HAS_DISPLAY(dev_priv)) intel_display_driver_resume_access(display); - intel_hpd_init(dev_priv); + intel_hpd_init(display); intel_display_driver_resume(display); @@ -1235,7 +1245,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_display_driver_enable_user_access(display); drm_kms_helper_poll_enable(dev); } - intel_hpd_poll_disable(dev_priv); + intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -1575,7 +1585,7 @@ static int intel_runtime_suspend(struct device *kdev) assert_forcewakes_inactive(&dev_priv->uncore); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) - intel_hpd_poll_enable(dev_priv); + intel_hpd_poll_enable(display); drm_dbg(&dev_priv->drm, "Device suspended\n"); return 0; @@ -1633,11 +1643,11 @@ static int intel_runtime_resume(struct device *kdev) * everyone else do it here. */ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - intel_hpd_init(dev_priv); - intel_hpd_poll_disable(dev_priv); + intel_hpd_init(display); + intel_hpd_poll_disable(display); } - skl_watermark_ipc_update(dev_priv); + skl_watermark_ipc_update(display); enable_rpm_wakeref_asserts(rpm); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 54538b6f85df..236c48d282e4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -306,6 +306,7 @@ struct drm_i915_private { INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, INTEL_DRAM_GDDR_ECC, + __INTEL_DRAM_TYPE_MAX, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 37ca4a35daf2..c1f938a1da44 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -277,14 +277,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); if (iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_ack(display, &eir, &dpinvgtt); /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT)) @@ -306,12 +306,12 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_handler(display, eir, dpinvgtt); - valleyview_pipestat_irq_handler(dev_priv, pipe_stats); + valleyview_pipestat_irq_handler(display, pipe_stats); } while (0); pmu_irq_stats(dev_priv, ret); @@ -367,14 +367,14 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); if (iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_ack(display, &eir, &dpinvgtt); /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT | @@ -392,12 +392,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_handler(display, eir, dpinvgtt); - valleyview_pipestat_irq_handler(dev_priv, pipe_stats); + valleyview_pipestat_irq_handler(display, pipe_stats); } while (0); pmu_irq_stats(dev_priv, ret); @@ -418,6 +418,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) static irqreturn_t ilk_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; + struct intel_display *display = &i915->display; void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -458,9 +459,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) if (de_iir) { raw_reg_write(regs, DEIIR, de_iir); if (DISPLAY_VER(i915) >= 7) - ivb_display_irq_handler(i915, de_iir); + ivb_display_irq_handler(display, de_iir); else - ilk_display_irq_handler(i915, de_iir); + ilk_display_irq_handler(display, de_iir); ret = IRQ_HANDLED; } @@ -506,6 +507,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; + struct intel_display *display = &dev_priv->display; void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore); u32 master_ctl; @@ -524,7 +526,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & ~GEN8_GT_IRQS) { disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - gen8_de_irq_handler(dev_priv, master_ctl); + gen8_de_irq_handler(display, master_ctl); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); } @@ -556,6 +558,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; + struct intel_display *display = &i915->display; void __iomem * const regs = intel_uncore_regs(&i915->uncore); struct intel_gt *gt = to_gt(i915); u32 master_ctl; @@ -575,13 +578,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & GEN11_DISPLAY_IRQ) - gen11_display_irq_handler(i915); + gen11_display_irq_handler(display); - gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); gen11_master_intr_enable(regs); - gen11_gu_misc_irq_handler(i915, gu_misc_iir); + gen11_gu_misc_irq_handler(display, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -613,6 +616,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs) static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; + struct intel_display *display = &i915->display; struct intel_gt *gt = to_gt(i915); void __iomem * const regs = intel_uncore_regs(gt->uncore); u32 master_tile_ctl, master_ctl; @@ -641,13 +645,13 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) gen11_gt_irq_handler(gt, master_ctl); if (master_ctl & GEN11_DISPLAY_IRQ) - gen11_display_irq_handler(i915); + gen11_display_irq_handler(display); - gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); dg1_master_intr_enable(regs); - gen11_gu_misc_irq_handler(i915, gu_misc_iir); + gen11_gu_misc_irq_handler(display, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -691,24 +695,27 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) static void valleyview_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); gen5_gt_irq_reset(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_reset(dev_priv); + vlv_display_irq_reset(display); spin_unlock_irq(&dev_priv->irq_lock); } static void gen8_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; gen8_master_intr_disable(intel_uncore_regs(uncore)); gen8_gt_irq_reset(to_gt(dev_priv)); - gen8_display_irq_reset(dev_priv); + gen8_display_irq_reset(display); gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); if (HAS_PCH_SPLIT(dev_priv)) @@ -718,13 +725,14 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); gen11_gt_irq_reset(gt); - gen11_display_irq_reset(dev_priv); + gen11_display_irq_reset(display); gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); @@ -732,6 +740,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) static void dg1_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; struct intel_gt *gt; unsigned int i; @@ -741,7 +750,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) for_each_gt(gt, dev_priv, i) gen11_gt_irq_reset(gt); - gen11_display_irq_reset(dev_priv); + gen11_display_irq_reset(display); gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); @@ -751,6 +760,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) static void cherryview_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); @@ -761,23 +771,27 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_reset(dev_priv); + vlv_display_irq_reset(display); spin_unlock_irq(&dev_priv->irq_lock); } static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen5_gt_irq_postinstall(to_gt(dev_priv)); - ilk_de_irq_postinstall(dev_priv); + ilk_de_irq_postinstall(display); } static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen5_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_postinstall(dev_priv); + vlv_display_irq_postinstall(display); spin_unlock_irq(&dev_priv->irq_lock); intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); @@ -786,20 +800,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen8_gt_irq_postinstall(to_gt(dev_priv)); - gen8_de_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(display); gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore)); } static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; gen11_gt_irq_postinstall(gt); - gen11_de_irq_postinstall(dev_priv); + gen11_de_irq_postinstall(display); gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); @@ -809,6 +826,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; struct intel_gt *gt; @@ -819,7 +837,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); - dg1_de_irq_postinstall(dev_priv); + dg1_de_irq_postinstall(display); dg1_master_intr_enable(intel_uncore_regs(uncore)); intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); @@ -827,10 +845,12 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen8_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_postinstall(dev_priv); + vlv_display_irq_postinstall(display); spin_unlock_irq(&dev_priv->irq_lock); intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); @@ -900,9 +920,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, static void i915_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; - i9xx_display_irq_reset(dev_priv); + i9xx_display_irq_reset(display); gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); @@ -911,6 +932,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv) static void i915_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; @@ -932,7 +954,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) enable_mask |= I915_ASLE_INTERRUPT; } - if (I915_HAS_HOTPLUG(dev_priv)) { + if (HAS_HOTPLUG(dev_priv)) { dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PORT_INTERRUPT; } @@ -942,16 +964,17 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); - i915_enable_asle_pipestat(dev_priv); + i915_enable_asle_pipestat(display); } static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; + struct intel_display *display = &dev_priv->display; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -972,13 +995,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; - if (I915_HAS_HOTPLUG(dev_priv) && + if (HAS_HOTPLUG(dev_priv) && iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); @@ -992,9 +1015,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); - i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); + i915_pipestat_irq_handler(display, iir, pipe_stats); } while (0); pmu_irq_stats(dev_priv, ret); @@ -1006,9 +1029,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) static void i965_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; - i9xx_display_irq_reset(dev_priv); + i9xx_display_irq_reset(display); gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); @@ -1036,6 +1060,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915) static void i965_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; @@ -1064,17 +1089,18 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); - i915_enable_asle_pipestat(dev_priv); + i915_enable_asle_pipestat(display); } static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; + struct intel_display *display = &dev_priv->display; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -1096,11 +1122,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; if (iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); @@ -1119,9 +1145,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); - i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); + i965_pipestat_irq_handler(display, iir, pipe_stats); } while (0); pmu_irq_stats(dev_priv, IRQ_HANDLED); @@ -1280,6 +1306,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; int irq = to_pci_dev(dev_priv->drm.dev)->irq; if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled)) @@ -1289,7 +1316,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) free_irq(irq, dev_priv); - intel_hpd_cancel_work(dev_priv); + intel_hpd_cancel_work(display); dev_priv->irqs_enabled = false; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c5064eebe063..49beab8e324d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1077,6 +1077,7 @@ #define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C) #define BXT_GMBUS_GATING_DIS (1 << 14) +#define DG2_DPFC_GATING_DIS REG_BIT(31) #define GEN9_CLKGATE_DIS_5 _MMIO(0x46540) #define DPCE_GATING_DIS REG_BIT(17) @@ -4242,6 +4243,11 @@ enum skl_power_gate { #define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A) #define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7) +#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114 +#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114 +#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B) +#define MTL_DPFC_GATING_DIS REG_BIT(6) + #define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700) #define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8) #define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4) diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index d40ee1b42110..59bd603e6deb 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -171,6 +171,17 @@ intel_memory_region_by_type(struct drm_i915_private *i915, return NULL; } +bool intel_memory_type_is_local(enum intel_memory_type mem_type) +{ + switch (mem_type) { + case INTEL_MEMORY_LOCAL: + case INTEL_MEMORY_STOLEN_LOCAL: + return true; + default: + return false; + } +} + /** * intel_memory_region_reserve - Reserve a memory range * @mem: The region for which we want to reserve a range. @@ -216,7 +227,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem, return err; } -static const char *region_type_str(u16 type) +const char *intel_memory_type_str(enum intel_memory_type type) { switch (type) { case INTEL_MEMORY_SYSTEM: @@ -260,7 +271,7 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->instance = instance; snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u", - region_type_str(type), instance); + intel_memory_type_str(type), instance); mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 5973b6fe13cf..b3b75be9ced5 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -85,6 +85,8 @@ struct intel_memory_region { void *region_private; }; +bool intel_memory_type_is_local(enum intel_memory_type mem_type); + struct intel_memory_region * intel_memory_region_lookup(struct drm_i915_private *i915, u16 class, u16 instance); @@ -107,6 +109,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915); struct intel_memory_region * intel_memory_region_by_type(struct drm_i915_private *i915, enum intel_memory_type mem_type); +const char *intel_memory_type_str(enum intel_memory_type type); __printf(2, 3) void intel_memory_region_set_name(struct intel_memory_region *mem, diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 48836ef52d40..a2894a56e18f 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -7,8 +7,6 @@ #ifndef INTEL_WAKEREF_H #define INTEL_WAKEREF_H -#include <drm/drm_print.h> - #include <linux/atomic.h> #include <linux/bitfield.h> #include <linux/bits.h> @@ -16,11 +14,13 @@ #include <linux/mutex.h> #include <linux/refcount.h> #include <linux/ref_tracker.h> -#include <linux/slab.h> -#include <linux/stackdepot.h> #include <linux/timer.h> #include <linux/workqueue.h> +struct drm_printer; +struct intel_runtime_pm; +struct intel_wakeref; + typedef struct ref_tracker *intel_wakeref_t; #define INTEL_REFTRACK_DEAD_COUNT 16 @@ -32,9 +32,6 @@ typedef struct ref_tracker *intel_wakeref_t; #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif -struct intel_runtime_pm; -struct intel_wakeref; - struct intel_wakeref_ops { int (*get)(struct intel_wakeref *wf); int (*put)(struct intel_wakeref *wf); diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index f60eedb0e92c..eee5c4f45a43 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -33,8 +33,14 @@ static const char *intel_dram_type_str(enum intel_dram_type type) DRAM_TYPE_STR(DDR4), DRAM_TYPE_STR(LPDDR3), DRAM_TYPE_STR(LPDDR4), + DRAM_TYPE_STR(DDR5), + DRAM_TYPE_STR(LPDDR5), + DRAM_TYPE_STR(GDDR), + DRAM_TYPE_STR(GDDR_ECC), }; + BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX); + if (type >= ARRAY_SIZE(str)) type = INTEL_DRAM_UNKNOWN; @@ -444,8 +450,6 @@ skl_get_dram_info(struct drm_i915_private *i915) int ret; dram_info->type = skl_get_dram_type(i915); - drm_dbg_kms(&i915->drm, "DRAM type: %s\n", - intel_dram_type_str(dram_info->type)); ret = skl_dram_get_channels_info(i915); if (ret) @@ -560,10 +564,9 @@ static int bxt_get_dram_info(struct drm_i915_private *i915) dram_info->type != type); drm_dbg_kms(&i915->drm, - "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n", + "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n", i - BXT_D_CR_DRP0_DUNIT_START, - dimm.size, dimm.width, dimm.ranks, - intel_dram_type_str(type)); + dimm.size, dimm.width, dimm.ranks); if (valid_ranks == 0) valid_ranks = dimm.ranks; @@ -730,6 +733,10 @@ void intel_dram_detect(struct drm_i915_private *i915) ret = bxt_get_dram_info(i915); else ret = skl_get_dram_info(i915); + + drm_dbg_kms(&i915->drm, "DRAM type: %s\n", + intel_dram_type_str(dram_info->type)); + if (ret) return; diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile index 3d9d4d40fb80..7cca66f00a38 100644 --- a/drivers/gpu/drm/imagination/Makefile +++ b/drivers/gpu/drm/imagination/Makefile @@ -12,8 +12,10 @@ powervr-y := \ pvr_fw.o \ pvr_fw_meta.o \ pvr_fw_mips.o \ + pvr_fw_riscv.o \ pvr_fw_startstop.o \ pvr_fw_trace.o \ + pvr_fw_util.o \ pvr_gem.o \ pvr_hwrt.o \ pvr_job.o \ diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c index 6b77c9b4bde8..c7ce7daaa87a 100644 --- a/drivers/gpu/drm/imagination/pvr_debugfs.c +++ b/drivers/gpu/drm/imagination/pvr_debugfs.c @@ -28,9 +28,8 @@ pvr_debugfs_init(struct drm_minor *minor) struct drm_device *drm_dev = minor->dev; struct pvr_device *pvr_dev = to_pvr_device(drm_dev); struct dentry *root = minor->debugfs_root; - size_t i; - for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) { + for (size_t i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) { const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i]; struct dentry *dir; diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index 1704c0268589..8b9ba4983c4c 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/types.h> @@ -120,6 +121,21 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev) return 0; } +static int pvr_device_reset_init(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct reset_control *reset; + + reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL); + if (IS_ERR(reset)) + return dev_err_probe(drm_dev->dev, PTR_ERR(reset), + "failed to get gpu reset line\n"); + + pvr_dev->reset = reset; + + return 0; +} + /** * pvr_device_process_active_queues() - Process all queue related events. * @pvr_dev: PowerVR device to check @@ -146,9 +162,61 @@ static void pvr_device_process_active_queues(struct pvr_device *pvr_dev) mutex_unlock(&pvr_dev->queues.lock); } +static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev) +{ + u32 events; + + WARN_ON_ONCE(!pvr_dev->has_safety_events); + + events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS); + + return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0; +} + +static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev) +{ + WARN_ON_ONCE(!pvr_dev->has_safety_events); + + pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR, + ROGUE_CR_EVENT_CLEAR_SAFETY_EN); +} + +static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + u32 events; + + WARN_ON_ONCE(!pvr_dev->has_safety_events); + + events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE); + + /* Handle only these events on the host and leave the rest to the FW. */ + events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN | + ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN; + + pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events); + + if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) { + u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS); + + pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw); + + drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw); + } + + if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) { + /* + * The watchdog timer is disabled by the driver so this event + * should never be fired. + */ + drm_info(drm_dev, "Safety event: Watchdog timeout\n"); + } +} + static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data) { struct pvr_device *pvr_dev = data; + struct drm_device *drm_dev = from_pvr_device(pvr_dev); irqreturn_t ret = IRQ_NONE; /* We are in the threaded handler, we can keep dequeuing events until we @@ -164,30 +232,76 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data) pvr_device_process_active_queues(pvr_dev); } - pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev); + pm_runtime_mark_last_busy(drm_dev->dev); ret = IRQ_HANDLED; } - /* Unmask FW irqs before returning, so new interrupts can be received. */ - pvr_fw_irq_enable(pvr_dev); + if (pvr_dev->has_safety_events) { + int err; + + /* + * Ensure the GPU is powered on since some safety events (such + * as ECC faults) can happen outside of job submissions, which + * are otherwise the only time a power reference is held. + */ + err = pvr_power_get(pvr_dev); + if (err) { + drm_err_ratelimited(drm_dev, + "%s: could not take power reference (%d)\n", + __func__, err); + return ret; + } + + while (pvr_device_safety_irq_pending(pvr_dev)) { + pvr_device_safety_irq_clear(pvr_dev); + pvr_device_handle_safety_events(pvr_dev); + + ret = IRQ_HANDLED; + } + + pvr_power_put(pvr_dev); + } + return ret; } static irqreturn_t pvr_device_irq_handler(int irq, void *data) { struct pvr_device *pvr_dev = data; + bool safety_irq_pending = false; + + if (pvr_dev->has_safety_events) + safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev); - if (!pvr_fw_irq_pending(pvr_dev)) + if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending) return IRQ_NONE; /* Spurious IRQ - ignore. */ - /* Mask the FW interrupts before waking up the thread. Will be unmasked - * when the thread handler is done processing events. - */ - pvr_fw_irq_disable(pvr_dev); return IRQ_WAKE_THREAD; } +static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev) +{ + u32 num_ecc_rams = 0; + + /* + * Safety events are an optional feature of the RogueXE platform. They + * are only enabled if at least one of ECC memory or the watchdog timer + * are present in HW. While safety events can be generated by other + * systems, that will never happen if the above mentioned hardware is + * not present. + */ + if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) { + pvr_dev->has_safety_events = false; + return; + } + + PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams); + + pvr_dev->has_safety_events = + num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer); +} + /** * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device * @pvr_dev: Target PowerVR device. @@ -205,17 +319,25 @@ pvr_device_irq_init(struct pvr_device *pvr_dev) init_waitqueue_head(&pvr_dev->kccb.rtn_q); + pvr_device_safety_irq_init(pvr_dev); + pvr_dev->irq = platform_get_irq(plat_dev, 0); if (pvr_dev->irq < 0) return pvr_dev->irq; /* Clear any pending events before requesting the IRQ line. */ pvr_fw_irq_clear(pvr_dev); - pvr_fw_irq_enable(pvr_dev); + if (pvr_dev->has_safety_events) + pvr_device_safety_irq_clear(pvr_dev); + + /* + * The ONESHOT flag ensures IRQs are masked while the thread handler is + * running. + */ return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler, pvr_device_irq_thread_handler, - IRQF_SHARED, "gpu", pvr_dev); + IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev); } /** @@ -509,6 +631,11 @@ pvr_device_init(struct pvr_device *pvr_dev) if (err) return err; + /* Get the reset line for the GPU */ + err = pvr_device_reset_init(pvr_dev); + if (err) + return err; + /* Explicitly power the GPU so we can access control registers before the FW is booted. */ err = pm_runtime_resume_and_get(dev); if (err) diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index 6d0dfacb677b..7cb01c38d2a9 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -18,6 +18,7 @@ #include <linux/bits.h> #include <linux/compiler_attributes.h> #include <linux/compiler_types.h> +#include <linux/device.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -131,6 +132,22 @@ struct pvr_device { */ struct clk *mem_clk; + struct pvr_device_power { + struct device **domain_devs; + struct device_link **domain_links; + + u32 domain_count; + } power; + + /** + * @reset: Optional reset line. + * + * This may be used on some platforms to provide a reset line that needs to be de-asserted + * after power-up procedure. It would also need to be asserted after the power-down + * procedure. + */ + struct reset_control *reset; + /** @irq: IRQ number. */ int irq; @@ -300,6 +317,9 @@ struct pvr_device { * struct pvr_file. */ spinlock_t ctx_list_lock; + + /** @has_safety_events: Whether this device can raise safety events. */ + bool has_safety_events; }; /** @@ -728,8 +748,22 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset, __union_size, __member_size); \ }) -#define PVR_FW_PROCESSOR_TYPE_META 0 -#define PVR_FW_PROCESSOR_TYPE_MIPS 1 -#define PVR_FW_PROCESSOR_TYPE_RISCV 2 +/* + * These utility functions should more properly be placed in pvr_fw.h, but that + * would cause a dependency cycle between that header and this one. Since + * they're primarily used in pvr_device.c, let's put them in here for now. + */ + +static __always_inline bool +pvr_fw_irq_pending(struct pvr_device *pvr_dev) +{ + return pvr_dev->fw_dev.defs->irq_pending(pvr_dev); +} + +static __always_inline void +pvr_fw_irq_clear(struct pvr_device *pvr_dev) +{ + pvr_dev->fw_dev.defs->irq_clear(pvr_dev); +} #endif /* PVR_DEVICE_H */ diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index 0639502137b4..b058ec183bb3 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -44,6 +44,7 @@ * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies: * * * AXE-1-16M (found in Texas Instruments AM62) + * * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68) */ /** @@ -1411,6 +1412,10 @@ pvr_probe(struct platform_device *plat_dev) platform_set_drvdata(plat_dev, drm_dev); + err = pvr_power_domains_init(pvr_dev); + if (err) + return err; + init_rwsem(&pvr_dev->reset_sem); pvr_context_device_init(pvr_dev); @@ -1450,6 +1455,8 @@ err_watchdog_fini: err_context_fini: pvr_context_device_fini(pvr_dev); + pvr_power_domains_fini(pvr_dev); + return err; } @@ -1470,9 +1477,17 @@ static void pvr_remove(struct platform_device *plat_dev) pvr_watchdog_fini(pvr_dev); pvr_queue_device_fini(pvr_dev); pvr_context_device_fini(pvr_dev); + pvr_power_domains_fini(pvr_dev); } static const struct of_device_id dt_match[] = { + { .compatible = "img,img-rogue", .data = NULL }, + + /* + * This legacy compatible string was introduced early on before the more generic + * "img,img-rogue" was added. Keep it around here for compatibility, but never use + * "img,img-axe" in new devicetrees. + */ { .compatible = "img,img-axe", .data = NULL }, {} }; @@ -1498,3 +1513,4 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC); MODULE_LICENSE("Dual MIT/GPL"); MODULE_IMPORT_NS("DMA_BUF"); MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw"); +MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw"); diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c index 5e51bc980751..5228e214491c 100644 --- a/drivers/gpu/drm/imagination/pvr_free_list.c +++ b/drivers/gpu/drm/imagination/pvr_free_list.c @@ -237,11 +237,10 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list, dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); u64 dma_pfn = dma_addr >> ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; - u32 dma_addr_offset; BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE); - for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE; + for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE; dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) { WARN_ON_ONCE(dma_pfn >> 32); diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c index d09c4c684116..b2f8cba77346 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.c +++ b/drivers/gpu/drm/imagination/pvr_fw.c @@ -50,9 +50,8 @@ pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id) { const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; - u32 entry; - for (entry = 0; entry < num_layout_entries; entry++) { + for (u32 entry = 0; entry < num_layout_entries; entry++) { if (layout_entries[entry].id == id) return &layout_entries[entry]; } @@ -65,9 +64,8 @@ pvr_fw_find_private_data(struct pvr_device *pvr_dev) { const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; - u32 entry; - for (entry = 0; entry < num_layout_entries; entry++) { + for (u32 entry = 0; entry < num_layout_entries; entry++) { if (layout_entries[entry].id == META_PRIVATE_DATA || layout_entries[entry].id == MIPS_PRIVATE_DATA || layout_entries[entry].id == RISCV_PRIVATE_DATA) @@ -97,7 +95,6 @@ pvr_fw_validate(struct pvr_device *pvr_dev) const u8 *fw = firmware->data; u32 fw_offset = firmware->size - SZ_4K; u32 layout_table_size; - u32 entry; if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE)) return -EINVAL; @@ -144,7 +141,7 @@ pvr_fw_validate(struct pvr_device *pvr_dev) return -EINVAL; layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset]; - for (entry = 0; entry < header->layout_entry_num; entry++) { + for (u32 entry = 0; entry < header->layout_entry_num; entry++) { u32 start_addr = layout_entries[entry].base_addr; u32 end_addr = start_addr + layout_entries[entry].alloc_size; @@ -233,13 +230,12 @@ pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; u32 end_addr = addr + size; - int entry = 0; /* Ensure requested range is not zero, and size is not causing addr to overflow. */ if (end_addr <= addr) return -EINVAL; - for (entry = 0; entry < num_layout_entries; entry++) { + for (int entry = 0; entry < num_layout_entries; entry++) { u32 entry_start_addr = layout_entries[entry].base_addr; u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size; @@ -441,6 +437,9 @@ fw_runtime_cfg_init(void *cpu_ptr, void *priv) runtime_cfg->active_pm_latency_persistant = true; WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters, &runtime_cfg->default_dusts_num_init) != 0); + + /* Keep watchdog timer disabled. */ + runtime_cfg->wdg_period_us = 0; } static void @@ -663,7 +662,7 @@ pvr_fw_process(struct pvr_device *pvr_dev) return PTR_ERR(fw_code_ptr); } - if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) { + if (pvr_dev->fw_dev.defs->has_fixed_data_addr) { u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask; fw_data_ptr = @@ -939,18 +938,22 @@ pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev) int pvr_fw_init(struct pvr_device *pvr_dev) { + static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = { + [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta, + [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips, + [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv, + }; + u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT; u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn); struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; int err; - if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META) - fw_dev->defs = &pvr_fw_defs_meta; - else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS) - fw_dev->defs = &pvr_fw_defs_mips; - else + if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT) return -EINVAL; + fw_dev->defs = fw_defs[fw_dev->processor_type]; + err = fw_dev->defs->init(pvr_dev); if (err) return err; @@ -1456,6 +1459,15 @@ void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, *fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset); } +u64 +pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj) +{ + struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset; +} + /* * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW * structures diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h index b7966bd574a9..1404dd492d7c 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.h +++ b/drivers/gpu/drm/imagination/pvr_fw.h @@ -167,47 +167,30 @@ struct pvr_fw_defs { int (*wrapper_init)(struct pvr_device *pvr_dev); /** - * @has_fixed_data_addr: + * @irq_pending: Check interrupt status register for pending interrupts. * - * Called to check if firmware fixed data must be loaded at the address given by the - * firmware layout table. + * @pvr_dev: Target PowerVR device. * * This function is mandatory. + */ + bool (*irq_pending)(struct pvr_device *pvr_dev); + + /** + * @irq_clear: Clear pending interrupts. * - * Returns: - * * %true if firmware fixed data must be loaded at the address given by the firmware - * layout table. - * * %false otherwise. + * @pvr_dev: Target PowerVR device. + * + * This function is mandatory. */ - bool (*has_fixed_data_addr)(void); + void (*irq_clear)(struct pvr_device *pvr_dev); /** - * @irq: FW Interrupt information. + * @has_fixed_data_addr: Specify whether the firmware fixed data must be loaded at the + * address given by the firmware layout table. * - * Those are processor dependent, and should be initialized by the - * processor backend in pvr_fw_funcs::init(). + * This value is mandatory. */ - struct { - /** @enable_reg: FW interrupt enable register. */ - u32 enable_reg; - - /** @status_reg: FW interrupt status register. */ - u32 status_reg; - - /** - * @clear_reg: FW interrupt clear register. - * - * If @status_reg == @clear_reg, we clear by write a bit to zero, - * otherwise we clear by writing a bit to one. - */ - u32 clear_reg; - - /** @event_mask: Bitmask of events to listen for. */ - u32 event_mask; - - /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */ - u32 clear_mask; - } irq; + bool has_fixed_data_addr; }; /** @@ -400,26 +383,16 @@ struct pvr_fw_device { } fw_objs; }; -#define pvr_fw_irq_read_reg(pvr_dev, name) \ - pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg) - -#define pvr_fw_irq_write_reg(pvr_dev, name, value) \ - pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value) - -#define pvr_fw_irq_pending(pvr_dev) \ - (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask) - -#define pvr_fw_irq_clear(pvr_dev) \ - pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask) - -#define pvr_fw_irq_enable(pvr_dev) \ - pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask) - -#define pvr_fw_irq_disable(pvr_dev) \ - pvr_fw_irq_write_reg(pvr_dev, enable, 0) +enum pvr_fw_processor_type { + PVR_FW_PROCESSOR_TYPE_META = 0, + PVR_FW_PROCESSOR_TYPE_MIPS, + PVR_FW_PROCESSOR_TYPE_RISCV, + PVR_FW_PROCESSOR_TYPE_COUNT, +}; extern const struct pvr_fw_defs pvr_fw_defs_meta; extern const struct pvr_fw_defs pvr_fw_defs_mips; +extern const struct pvr_fw_defs pvr_fw_defs_riscv; int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev); int pvr_fw_init(struct pvr_device *pvr_dev); @@ -506,4 +479,18 @@ pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out) pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out); } +u64 +pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj); + +static __always_inline size_t +pvr_fw_obj_get_object_size(struct pvr_fw_object *fw_obj) +{ + return pvr_gem_object_size(fw_obj->gem); +} + +/* Util functions defined in pvr_fw_util.c. These are intended for use in pvr_fw_<arch>.c files. */ +int +pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr, + u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr); + #endif /* PVR_FW_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c index 6d13864851fc..60db3668ad3c 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_meta.c +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -370,13 +370,12 @@ configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr) const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; u64 seg_out_addr_top; - u32 i; seg_out_addr_top = ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, ROGUE_FW_SEGMMU_META_BIFDM_ID); - for (i = 0; i < num_layout_entries; i++) { + for (u32 i = 0; i < num_layout_entries; i++) { /* * FW code is using the bootloader segment which is already * configured on boot. FW coremem code and data don't use the @@ -534,9 +533,17 @@ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) } static bool -pvr_meta_has_fixed_data_addr(void) +pvr_meta_irq_pending(struct pvr_device *pvr_dev) { - return false; + return pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS) & + ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN; +} + +static void +pvr_meta_irq_clear(struct pvr_device *pvr_dev) +{ + pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS, + ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK); } const struct pvr_fw_defs pvr_fw_defs_meta = { @@ -546,12 +553,7 @@ const struct pvr_fw_defs pvr_fw_defs_meta = { .vm_unmap = pvr_meta_vm_unmap, .get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset, .wrapper_init = pvr_meta_wrapper_init, - .has_fixed_data_addr = pvr_meta_has_fixed_data_addr, - .irq = { - .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE, - .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS, - .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS, - .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN, - .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK, - }, + .irq_pending = pvr_meta_irq_pending, + .irq_clear = pvr_meta_irq_clear, + .has_fixed_data_addr = false, }; diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c index 0bed0257e2ab..6914fc46db50 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_mips.c +++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c @@ -8,7 +8,6 @@ #include "pvr_rogue_mips.h" #include "pvr_vm_mips.h" -#include <linux/elf.h> #include <linux/err.h> #include <linux/types.h> @@ -16,60 +15,6 @@ #define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */ #define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M -/** - * process_elf_command_stream() - Process ELF firmware image and populate - * firmware sections - * @pvr_dev: Device pointer. - * @fw: Pointer to firmware image. - * @fw_code_ptr: Pointer to FW code section. - * @fw_data_ptr: Pointer to FW data section. - * @fw_core_code_ptr: Pointer to FW coremem code section. - * @fw_core_data_ptr: Pointer to FW coremem data section. - * - * Returns : - * * 0 on success, or - * * -EINVAL on any error in ELF command stream. - */ -static int -process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr, - u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr) -{ - struct elf32_hdr *header = (struct elf32_hdr *)fw; - struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff); - struct drm_device *drm_dev = from_pvr_device(pvr_dev); - u32 entry; - int err; - - for (entry = 0; entry < header->e_phnum; entry++, program_header++) { - void *write_addr; - - /* Only consider loadable entries in the ELF segment table */ - if (program_header->p_type != PT_LOAD) - continue; - - err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr, - program_header->p_memsz, fw_code_ptr, fw_data_ptr, - fw_core_code_ptr, fw_core_data_ptr, &write_addr); - if (err) { - drm_err(drm_dev, - "Addr 0x%x (size: %d) not found in any firmware segment", - program_header->p_vaddr, program_header->p_memsz); - return err; - } - - /* Write to FW allocation only if available */ - if (write_addr) { - memcpy(write_addr, fw + program_header->p_offset, - program_header->p_filesz); - - memset((u8 *)write_addr + program_header->p_filesz, 0, - program_header->p_memsz - program_header->p_filesz); - } - } - - return 0; -} - static int pvr_mips_init(struct pvr_device *pvr_dev) { @@ -97,11 +42,10 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw, const struct pvr_fw_layout_entry *stack_entry; struct rogue_mipsfw_boot_data *boot_data; dma_addr_t dma_addr; - u32 page_nr; int err; - err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, - fw_core_data_ptr); + err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, fw_core_data_ptr); if (err) return err; @@ -132,7 +76,7 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw, boot_data->reg_base = pvr_dev->regs_resource->start; - for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) { + for (u32 page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) { /* Firmware expects 4k pages, but host page size might be different. */ u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT; u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK; @@ -228,9 +172,17 @@ pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset) } static bool -pvr_mips_has_fixed_data_addr(void) +pvr_mips_irq_pending(struct pvr_device *pvr_dev) +{ + return pvr_cr_read32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS) & + ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN; +} + +static void +pvr_mips_irq_clear(struct pvr_device *pvr_dev) { - return true; + pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR, + ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN); } const struct pvr_fw_defs pvr_fw_defs_mips = { @@ -241,12 +193,7 @@ const struct pvr_fw_defs pvr_fw_defs_mips = { .vm_unmap = pvr_vm_mips_unmap, .get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset, .wrapper_init = pvr_mips_wrapper_init, - .has_fixed_data_addr = pvr_mips_has_fixed_data_addr, - .irq = { - .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE, - .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS, - .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR, - .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN, - .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN, - }, + .irq_pending = pvr_mips_irq_pending, + .irq_clear = pvr_mips_irq_clear, + .has_fixed_data_addr = true, }; diff --git a/drivers/gpu/drm/imagination/pvr_fw_riscv.c b/drivers/gpu/drm/imagination/pvr_fw_riscv.c new file mode 100644 index 000000000000..fc13d483be9a --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_riscv.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* Copyright (c) 2024 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw.h" +#include "pvr_fw_info.h" +#include "pvr_fw_mips.h" +#include "pvr_gem.h" +#include "pvr_rogue_cr_defs.h" +#include "pvr_rogue_riscv.h" +#include "pvr_vm.h" + +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/ktime.h> +#include <linux/types.h> + +#define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */ +#define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT) + +static int +pvr_riscv_wrapper_init(struct pvr_device *pvr_dev) +{ + const u64 common_opts = + ((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) + << ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) | + ((u64)MMU_CONTEXT_MAPPING_FWPRIV + << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT); + + u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj); + u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj); + + /* This condition allows us to OR the addresses into the register directly. */ + static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT == + ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT); + + WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK); + WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK); + + pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE), + code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); + + pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA), + data_addr | common_opts | + ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); + + /* Garten IDLE bit controlled by RISC-V. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, + ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); + + return 0; +} + +struct rogue_riscv_fw_boot_data { + u64 coremem_code_dev_vaddr; + u64 coremem_data_dev_vaddr; + u32 coremem_code_fw_addr; + u32 coremem_data_fw_addr; + u32 coremem_code_size; + u32 coremem_data_size; + u32 flags; + u32 reserved; +}; + +static int +pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw, + u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, + u32 core_code_alloc_size) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mem *fw_mem = &fw_dev->mem; + struct rogue_riscv_fw_boot_data *boot_data; + int err; + + err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, fw_core_data_ptr); + if (err) + goto err_out; + + boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr; + + if (fw_mem->core_code_obj) { + boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj); + pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr); + boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj); + } + + if (fw_mem->core_data_obj) { + boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj); + pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr); + boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj); + } + + return 0; + +err_out: + return err; +} + +static int +pvr_riscv_init(struct pvr_device *pvr_dev) +{ + pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0); + + return 0; +} + +static u32 +pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset) +{ + u32 fw_addr = fw_obj->fw_addr_offset + offset; + + /* RISC-V cacheability is determined by address. */ + if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED) + fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA); + else + fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA); + + return fw_addr; +} + +static int +pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + + return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start, + pvr_gem_object_size(pvr_obj)); +} + +static void +pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + + pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj, + fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size); +} + +static bool +pvr_riscv_irq_pending(struct pvr_device *pvr_dev) +{ + return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) & + ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN; +} + +static void +pvr_riscv_irq_clear(struct pvr_device *pvr_dev) +{ + pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR, + ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN); +} + +const struct pvr_fw_defs pvr_fw_defs_riscv = { + .init = pvr_riscv_init, + .fw_process = pvr_riscv_fw_process, + .vm_map = pvr_riscv_vm_map, + .vm_unmap = pvr_riscv_vm_unmap, + .get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset, + .wrapper_init = pvr_riscv_wrapper_init, + .irq_pending = pvr_riscv_irq_pending, + .irq_clear = pvr_riscv_irq_clear, + .has_fixed_data_addr = false, +}; diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c index 36cec227cfe3..dcbb9903e791 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_startstop.c +++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c @@ -49,6 +49,14 @@ rogue_bif_init(struct pvr_device *pvr_dev) pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr); + + if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) { + pc_addr = (((u64)pc_dma_addr >> ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) + << ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) & + ~ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK; + + pvr_cr_write64(pvr_dev, FWCORE_MEM_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr); + } } static int @@ -114,6 +122,9 @@ pvr_fw_start(struct pvr_device *pvr_dev) (void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */ } + if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) + pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 0); + /* Set Rogue in soft-reset. */ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask); if (has_reset2) @@ -167,6 +178,12 @@ pvr_fw_start(struct pvr_device *pvr_dev) /* ... and afterwards. */ udelay(3); + if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) { + /* Boot the FW. */ + pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 1); + udelay(3); + } + return 0; err_reset: diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 5dbb636d7d4f..a1098b521485 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -21,7 +21,6 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv) { struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr; struct pvr_fw_trace *fw_trace = priv; - u32 thread_nr; tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; tracebuf_ctrl->tracebuf_flags = 0; @@ -31,7 +30,7 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv) else tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE; - for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { struct rogue_fwif_tracebuf_space *tracebuf_space = &tracebuf_ctrl->tracebuf[thread_nr]; struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; @@ -48,10 +47,9 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev) { struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; struct drm_device *drm_dev = from_pvr_device(pvr_dev); - u32 thread_nr; int err; - for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; trace_buffer->buf = @@ -88,7 +86,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev) BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) != ARRAY_SIZE(fw_trace->buffers)); - for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { struct rogue_fwif_tracebuf_space *tracebuf_space = &fw_trace->tracebuf_ctrl->tracebuf[thread_nr]; struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; @@ -99,7 +97,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev) return 0; err_free_buf: - for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; if (trace_buffer->buf) @@ -112,9 +110,8 @@ err_free_buf: void pvr_fw_trace_fini(struct pvr_device *pvr_dev) { struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; - u32 thread_nr; - for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj); @@ -122,8 +119,6 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev) pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj); } -#if defined(CONFIG_DEBUG_FS) - /** * update_logtype() - Send KCCB command to trigger FW to update logtype * @pvr_dev: Target PowerVR device @@ -184,9 +179,7 @@ struct pvr_fw_trace_seq_data { static u32 find_sfid(u32 id) { - u32 i; - - for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) { + for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) { if (stid_fmts[i].id == id) return i; } @@ -285,12 +278,11 @@ static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data) static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos) { struct pvr_fw_trace_seq_data *trace_seq_data = s->private; - u32 i; /* Reset trace index, then advance to *pos. */ fw_trace_get_first(trace_seq_data); - for (i = 0; i < *pos; i++) { + for (u32 i = 0; i < *pos; i++) { if (!fw_trace_get_next(trace_seq_data)) return NULL; } @@ -447,7 +439,7 @@ static const struct file_operations pvr_fw_trace_fops = { void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask) { - if (old_mask != new_mask) + if (IS_ENABLED(CONFIG_DEBUG_FS) && old_mask != new_mask) update_logtype(pvr_dev, new_mask); } @@ -455,12 +447,14 @@ void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir) { struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; - u32 thread_nr; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return; static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10, "The filename buffer is only large enough for a single-digit thread count"); - for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) { + for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) { char filename[8]; snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr); @@ -469,4 +463,3 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir) &pvr_fw_trace_fops); } } -#endif diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h index 0074d2b18da0..1d0ef937427a 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.h +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h @@ -65,7 +65,6 @@ struct pvr_fw_trace { int pvr_fw_trace_init(struct pvr_device *pvr_dev); void pvr_fw_trace_fini(struct pvr_device *pvr_dev); -#if defined(CONFIG_DEBUG_FS) /* Forward declaration from <linux/dcache.h>. */ struct dentry; @@ -73,6 +72,5 @@ void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask); void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir); -#endif /* defined(CONFIG_DEBUG_FS) */ #endif /* PVR_FW_TRACE_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_util.c b/drivers/gpu/drm/imagination/pvr_fw_util.c new file mode 100644 index 000000000000..377fe72d86b8 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_util.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2024 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw.h" + +#include <drm/drm_device.h> +#include <drm/drm_print.h> + +#include <linux/elf.h> +#include <linux/string.h> +#include <linux/types.h> + +/** + * pvr_fw_process_elf_command_stream() - Process ELF firmware image and populate + * firmware sections + * @pvr_dev: Device pointer. + * @fw: Pointer to firmware image. + * @fw_code_ptr: Pointer to FW code section. + * @fw_data_ptr: Pointer to FW data section. + * @fw_core_code_ptr: Pointer to FW coremem code section. + * @fw_core_data_ptr: Pointer to FW coremem data section. + * + * Returns : + * * 0 on success, or + * * -EINVAL on any error in ELF command stream. + */ +int +pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, + u8 *fw_code_ptr, u8 *fw_data_ptr, + u8 *fw_core_code_ptr, u8 *fw_core_data_ptr) +{ + struct elf32_hdr *header = (struct elf32_hdr *)fw; + struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff); + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + int err; + + for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) { + void *write_addr; + + /* Only consider loadable entries in the ELF segment table */ + if (program_header->p_type != PT_LOAD) + continue; + + err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr, + program_header->p_memsz, fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, fw_core_data_ptr, &write_addr); + if (err) { + drm_err(drm_dev, + "Addr 0x%x (size: %d) not found in any firmware segment", + program_header->p_vaddr, program_header->p_memsz); + return err; + } + + /* Write to FW allocation only if available */ + if (write_addr) { + memcpy(write_addr, fw + program_header->p_offset, + program_header->p_filesz); + + memset((u8 *)write_addr + program_header->p_filesz, 0, + program_header->p_memsz - program_header->p_filesz); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c index 6a8c81fe8c1e..a66cf082af24 100644 --- a/drivers/gpu/drm/imagination/pvr_gem.c +++ b/drivers/gpu/drm/imagination/pvr_gem.c @@ -19,6 +19,7 @@ #include <linux/log2.h> #include <linux/mutex.h> #include <linux/pagemap.h> +#include <linux/property.h> #include <linux/refcount.h> #include <linux/scatterlist.h> @@ -76,8 +77,6 @@ pvr_gem_object_flags_validate(u64 flags) DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS), }; - int i; - /* * Check for bits set in undefined regions. Reserved regions refer to * options that can only be set by the kernel. These are explicitly @@ -91,7 +90,7 @@ pvr_gem_object_flags_validate(u64 flags) * Check for all combinations of flags marked as invalid in the array * above. */ - for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) { + for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) { u64 combo = invalid_combinations[i]; if ((flags & combo) == combo) @@ -203,7 +202,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj) dma_resv_lock(obj->resv, NULL); - err = drm_gem_shmem_vmap(shmem_obj, &map); + err = drm_gem_shmem_vmap_locked(shmem_obj, &map); if (err) goto err_unlock; @@ -257,7 +256,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj) dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); } - drm_gem_shmem_vunmap(shmem_obj, &map); + drm_gem_shmem_vunmap_locked(shmem_obj, &map); dma_resv_unlock(obj->resv); } @@ -336,6 +335,7 @@ struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t struct pvr_gem_object * pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags) { + struct drm_device *drm_dev = from_pvr_device(pvr_dev); struct drm_gem_shmem_object *shmem_obj; struct pvr_gem_object *pvr_obj; struct sg_table *sgt; @@ -345,7 +345,10 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags) if (size == 0 || !pvr_gem_object_flags_validate(flags)) return ERR_PTR(-EINVAL); - shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size); + if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT) + flags |= PVR_BO_CPU_CACHED; + + shmem_obj = drm_gem_shmem_create(drm_dev, size); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); @@ -360,8 +363,7 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags) goto err_shmem_object_free; } - dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt, - DMA_BIDIRECTIONAL); + dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL); /* * Do this last because pvr_gem_object_zero() requires a fully diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h index e0e5ea509a2e..c99f30cc6208 100644 --- a/drivers/gpu/drm/imagination/pvr_gem.h +++ b/drivers/gpu/drm/imagination/pvr_gem.h @@ -44,8 +44,10 @@ struct pvr_file; * Bits not defined anywhere are "undefined". * * CPU mapping options - * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this - * flag to override this behaviour and map the object cached. + * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set + * this flag to override this behaviour and map the object cached. If the dma_coherent + * property is present in devicetree, all allocations will be mapped as if this flag was set. + * This does not require any additional consideration at allocation time. * * Firmware options * :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c index 54f88d6c01e5..dc0c25fa1847 100644 --- a/drivers/gpu/drm/imagination/pvr_hwrt.c +++ b/drivers/gpu/drm/imagination/pvr_hwrt.c @@ -44,13 +44,12 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file, { struct pvr_device *pvr_dev = pvr_file->pvr_dev; int err; - int i; hwrt->pvr_dev = pvr_dev; hwrt->max_rts = args->layers; /* Get pointers to the free lists */ - for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { + for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]); if (!hwrt->free_lists[i]) { err = -EINVAL; @@ -67,7 +66,7 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file, return 0; err_put_free_lists: - for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { + for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { pvr_free_list_put(hwrt->free_lists[i]); hwrt->free_lists[i] = NULL; } @@ -78,9 +77,7 @@ err_put_free_lists: static void hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt) { - int i; - - for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { + for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { pvr_free_list_put(hwrt->free_lists[i]); hwrt->free_lists[i] = NULL; } @@ -363,13 +360,12 @@ hwrt_data_init_fw_structure(struct pvr_file *pvr_file, struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args; struct pvr_device *pvr_dev = pvr_file->pvr_dev; struct rogue_fwif_rta_ctl *rta_ctl; - int free_list_i; int err; pvr_fw_object_get_fw_addr(hwrt->common_fw_obj, &hwrt_data->data.hwrt_data_common_fw_addr); - for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) { + for (int free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) { pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj, &hwrt_data->data.freelists_fw_addr[free_list_i]); } diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c index 4fe70610ed94..450d476d183f 100644 --- a/drivers/gpu/drm/imagination/pvr_mmu.c +++ b/drivers/gpu/drm/imagination/pvr_mmu.c @@ -17,6 +17,7 @@ #include <linux/dma-mapping.h> #include <linux/kmemleak.h> #include <linux/minmax.h> +#include <linux/property.h> #include <linux/sizes.h> #define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_)) @@ -259,6 +260,7 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page, struct device *dev = from_pvr_device(pvr_dev)->dev; struct page *raw_page; + pgprot_t prot; int err; dma_addr_t dma_addr; @@ -268,7 +270,11 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page, if (!raw_page) return -ENOMEM; - host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + prot = PAGE_KERNEL; + if (device_get_dma_attr(dev) != DEV_DMA_COHERENT) + prot = pgprot_writecombine(prot); + + host_ptr = vmap(&raw_page, 1, VM_MAP, prot); if (!host_ptr) { err = -ENOMEM; goto err_free_page; diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index ba7816fd28ec..41f5d89e78b8 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -10,11 +10,15 @@ #include <drm/drm_drv.h> #include <drm/drm_managed.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/mutex.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> @@ -252,6 +256,8 @@ pvr_power_device_suspend(struct device *dev) clk_disable_unprepare(pvr_dev->sys_clk); clk_disable_unprepare(pvr_dev->core_clk); + err = reset_control_assert(pvr_dev->reset); + err_drm_dev_exit: drm_dev_exit(idx); @@ -282,16 +288,33 @@ pvr_power_device_resume(struct device *dev) if (err) goto err_sys_clk_disable; + /* + * According to the hardware manual, a delay of at least 32 clock + * cycles is required between de-asserting the clkgen reset and + * de-asserting the GPU reset. Assuming a worst-case scenario with + * a very high GPU clock frequency, a delay of 1 microsecond is + * sufficient to ensure this requirement is met across all + * feasible GPU clock speeds. + */ + udelay(1); + + err = reset_control_deassert(pvr_dev->reset); + if (err) + goto err_mem_clk_disable; + if (pvr_dev->fw_dev.booted) { err = pvr_power_fw_enable(pvr_dev); if (err) - goto err_mem_clk_disable; + goto err_reset_assert; } drm_dev_exit(idx); return 0; +err_reset_assert: + reset_control_assert(pvr_dev->reset); + err_mem_clk_disable: clk_disable_unprepare(pvr_dev->mem_clk); @@ -431,3 +454,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev) { cancel_delayed_work_sync(&pvr_dev->watchdog.work); } + +int pvr_power_domains_init(struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + + struct device_link **domain_links __free(kfree) = NULL; + struct device **domain_devs __free(kfree) = NULL; + int domain_count; + int link_count; + + char dev_name[2] = "a"; + int err; + int i; + + domain_count = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (domain_count < 0) + return domain_count; + + if (domain_count <= 1) + return 0; + + link_count = domain_count + (domain_count - 1); + + domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL); + if (!domain_devs) + return -ENOMEM; + + domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL); + if (!domain_links) + return -ENOMEM; + + for (i = 0; i < domain_count; i++) { + struct device *domain_dev; + + dev_name[0] = 'a' + i; + domain_dev = dev_pm_domain_attach_by_name(dev, dev_name); + if (IS_ERR_OR_NULL(domain_dev)) { + err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV; + goto err_detach; + } + + domain_devs[i] = domain_dev; + } + + for (i = 0; i < domain_count; i++) { + struct device_link *link; + + link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); + if (!link) { + err = -ENODEV; + goto err_unlink; + } + + domain_links[i] = link; + } + + for (i = domain_count; i < link_count; i++) { + struct device_link *link; + + link = device_link_add(domain_devs[i - domain_count + 1], + domain_devs[i - domain_count], + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); + if (!link) { + err = -ENODEV; + goto err_unlink; + } + + domain_links[i] = link; + } + + pvr_dev->power = (struct pvr_device_power){ + .domain_devs = no_free_ptr(domain_devs), + .domain_links = no_free_ptr(domain_links), + .domain_count = domain_count, + }; + + return 0; + +err_unlink: + while (--i >= 0) + device_link_del(domain_links[i]); + + i = domain_count; + +err_detach: + while (--i >= 0) + dev_pm_domain_detach(domain_devs[i], true); + + return err; +} + +void pvr_power_domains_fini(struct pvr_device *pvr_dev) +{ + const int domain_count = pvr_dev->power.domain_count; + + int i = domain_count + (domain_count - 1); + + while (--i >= 0) + device_link_del(pvr_dev->power.domain_links[i]); + + i = domain_count; + + while (--i >= 0) + dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true); + + kfree(pvr_dev->power.domain_links); + kfree(pvr_dev->power.domain_devs); + + pvr_dev->power = (struct pvr_device_power){ 0 }; +} diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h index 9a9312dcb2da..ada85674a7ca 100644 --- a/drivers/gpu/drm/imagination/pvr_power.h +++ b/drivers/gpu/drm/imagination/pvr_power.h @@ -38,4 +38,7 @@ pvr_power_put(struct pvr_device *pvr_dev) return pm_runtime_put(drm_dev->dev); } +int pvr_power_domains_init(struct pvr_device *pvr_dev); +void pvr_power_domains_fini(struct pvr_device *pvr_dev); + #endif /* PVR_POWER_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h index 2a90d02796d3..790c97f80a2a 100644 --- a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h +++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h @@ -827,6 +827,120 @@ #define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU #define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U +/* Register ROGUE_CR_EVENT_CLEAR */ +#define ROGUE_CR_EVENT_CLEAR 0x0138U +#define ROGUE_CR_EVENT_CLEAR__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL +#define ROGUE_CR_EVENT_CLEAR__SIGNALS__MASKFULL 0x00000000E007FFFFULL +#define ROGUE_CR_EVENT_CLEAR_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT 31U +#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN 0x80000000U +#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT 30U +#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN 0x40000000U +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT 29U +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U +#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT 28U +#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN 0x10000000U +#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT 27U +#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU +#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN 0x08000000U +#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT 26U +#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN 0x04000000U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT 25U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN 0x02000000U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT 24U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU +#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN 0x01000000U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT 23U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU +#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN 0x00800000U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT 22U +#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU +#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN 0x00400000U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT 21U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU +#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN 0x00200000U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT 20U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN 0x00100000U +#define ROGUE_CR_EVENT_CLEAR_SAFETY_SHIFT 20U +#define ROGUE_CR_EVENT_CLEAR_SAFETY_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_EVENT_CLEAR_SAFETY_EN 0x00100000U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT 19U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU +#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN 0x00080000U +#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT 19U +#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK 0xFFF7FFFFU +#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_EN 0x00080000U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT 18U +#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN 0x00040000U +#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U +#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U +#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT 17U +#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_EN 0x00020000U +#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT 17U +#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT 16U +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN 0x00010000U +#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT 15U +#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK 0xFFFF7FFFU +#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_EN 0x00008000U +#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT 14U +#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU +#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_EN 0x00004000U +#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_SHIFT 13U +#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK 0xFFFFDFFFU +#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_EN 0x00002000U +#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_SHIFT 12U +#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK 0xFFFFEFFFU +#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_EN 0x00001000U +#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_SHIFT 11U +#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK 0xFFFFF7FFU +#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_EN 0x00000800U +#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT 10U +#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU +#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_EN 0x00000400U +#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT 9U +#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN 0x00000200U +#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT 8U +#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN 0x00000100U +#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT 7U +#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN 0x00000080U +#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT 6U +#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_EN 0x00000040U +#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_SHIFT 5U +#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_EN 0x00000020U +#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT 4U +#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN 0x00000010U +#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT 3U +#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN 0x00000008U +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT 2U +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN 0x00000004U +#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT 1U +#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_EN 0x00000002U +#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT 0U +#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_EN 0x00000001U + /* Register ROGUE_CR_TIMER */ #define ROGUE_CR_TIMER 0x0160U #define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL @@ -6031,25 +6145,6 @@ #define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U #define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U -/* Register ROGUE_CR_ECC_RAM_ERR_INJ */ -#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U -#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL -#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U -#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU -#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U -#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U -#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U -#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U -#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U -#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU -#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U -#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U -#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU -#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U -#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U -#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU -#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U - /* Register ROGUE_CR_ECC_RAM_INIT_KICK */ #define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U #define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL @@ -6163,6 +6258,26 @@ #define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU #define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U +/* Register ROGUE_CR_FAULT_FW_STATUS */ +#define ROGUE_CR_FAULT_FW_STATUS 0xF3B0U +#define ROGUE_CR_FAULT_FW_STATUS_MASKFULL 0x0000000000010001ULL +#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT 16U +#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_EN 0x00010000U +#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT 0U +#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_EN 0x00000001U + +/* Register ROGUE_CR_FAULT_FW_CLEAR */ +#define ROGUE_CR_FAULT_FW_CLEAR 0xF3B8U +#define ROGUE_CR_FAULT_FW_CLEAR_MASKFULL 0x0000000000010001ULL +#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT 16U +#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN 0x00010000U +#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT 0U +#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_EN 0x00000001U + /* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */ #define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U #define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL diff --git a/drivers/gpu/drm/imagination/pvr_rogue_riscv.h b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h new file mode 100644 index 000000000000..9a070e24fa6a --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* Copyright (c) 2024 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_RISCV_H +#define PVR_ROGUE_RISCV_H + +#include "pvr_rogue_cr_defs.h" + +#include <linux/bitops.h> +#include <linux/sizes.h> +#include <linux/types.h> + +#define ROGUE_RISCVFW_REGION_SIZE SZ_256M +#define ROGUE_RISCVFW_REGION_SHIFT __ffs(ROGUE_RISCVFW_REGION_SIZE) + +enum rogue_riscvfw_region { + ROGUE_RISCV_REGION__RESERVED_0 = 0, + ROGUE_RISCV_REGION__RESERVED_1, + ROGUE_RISCV_REGION_SOCIF, + ROGUE_RISCV_REGION__RESERVED_3, + ROGUE_RISCV_REGION__RESERVED_4, + ROGUE_RISCV_REGION_BOOTLDR_DATA, + ROGUE_RISCV_REGION_SHARED_CACHED_DATA, + ROGUE_RISCV_REGION__RESERVED_7, + ROGUE_RISCV_REGION_COREMEM, + ROGUE_RISCV_REGION__RESERVED_9, + ROGUE_RISCV_REGION__RESERVED_A, + ROGUE_RISCV_REGION__RESERVED_B, + ROGUE_RISCV_REGION_BOOTLDR_CODE, + ROGUE_RISCV_REGION_SHARED_UNCACHED_DATA, + ROGUE_RISCV_REGION__RESERVED_E, + ROGUE_RISCV_REGION__RESERVED_F, + + ROGUE_RISCV_REGION__COUNT, +}; + +#define ROGUE_RISCVFW_REGION_BASE(r) ((u32)(ROGUE_RISCV_REGION_##r) << ROGUE_RISCVFW_REGION_SHIFT) +#define ROGUE_RISCVFW_REGION_REMAP_CR(r) \ + (ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 + (u32)(ROGUE_RISCV_REGION_##r) * 8U) + +#endif /* PVR_ROGUE_RISCV_H */ diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c index 975336a4facf..679aa618b7a9 100644 --- a/drivers/gpu/drm/imagination/pvr_stream.c +++ b/drivers/gpu/drm/imagination/pvr_stream.c @@ -67,9 +67,8 @@ pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *st u8 *dest, u32 dest_size, u32 *stream_offset_out) { int err = 0; - u32 i; - for (i = 0; i < nr_entries; i++) { + for (u32 i = 0; i < nr_entries; i++) { if (stream_def[i].offset >= dest_size) { err = -EINVAL; break; @@ -131,7 +130,6 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev, u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX]; u32 ext_header; int err = 0; - u32 i; /* Copy "must have" mask from device. We clear this as we process the stream. */ memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type], @@ -159,7 +157,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev, musthave_masks[type] &= ~data; - for (i = 0; i < header->ext_streams_num; i++) { + for (u32 i = 0; i < header->ext_streams_num; i++) { const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i]; if (!(ext_header & ext_def->header_mask)) @@ -181,7 +179,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev, * Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks * for this command was not present. */ - for (i = 0; i < cmd_defs->ext_nr_headers; i++) { + for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) { if (musthave_masks[i]) return -EINVAL; } @@ -245,13 +243,11 @@ pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs if (err) return err; } else { - u32 i; - /* * If we don't have an extension stream then there must not be any "must have" * quirks for this command. */ - for (i = 0; i < cmd_defs->ext_nr_headers; i++) { + for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) { if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i]) return -EINVAL; } diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c index 94af854547d6..5847a1c92bea 100644 --- a/drivers/gpu/drm/imagination/pvr_vm_mips.c +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -100,10 +100,9 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev) { struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data; - int page_nr; vunmap(mips_data->pt); - for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) { + for (int page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) { dma_unmap_page(from_pvr_device(pvr_dev)->dev, mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 9e66eb77b1eb..6d8325c76697 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, } static int imx_pd_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags); } static const struct drm_bridge_funcs imx_pd_bridge_funcs = { diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 20b93fff0239..f851e9ffdb28 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, } static int ingenic_drm_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { - struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder); + struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder); - return drm_bridge_attach(bridge->encoder, ib->next_bridge, + return drm_bridge_attach(encoder, ib->next_bridge, &ib->bridge, flags); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 9bb997dbb4b9..5deec673c11e 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) } bo->base.pages = pages; - bo->base.pages_use_count = 1; + refcount_set(&bo->base.pages_use_count, 1); mapping_set_unevictable(mapping); } @@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_vmap(&bo->base, map); + return drm_gem_shmem_vmap_locked(&bo->base, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 825135a26aa4..7934098e651b 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) } else { buffer_chunk->size = lima_bo_size(bo); - ret = drm_gem_vmap_unlocked(&bo->base.base, &map); + ret = drm_gem_vmap(&bo->base.base, &map); if (ret) { kvfree(et); goto out; @@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); - drm_gem_vunmap_unlocked(&bo->base.base, &map); + drm_gem_vunmap(&bo->base.base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 395449a72f0a..a3423459dd7a 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge) } static int mcde_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mcde_dsi *d = bridge_to_mcde_dsi(bridge); @@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge, } /* Attach the DSI bridge to the output (panel etc) bridge */ - return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags); + return drm_bridge_attach(encoder, d->bridge_out, bridge, flags); } static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = { @@ -1137,7 +1138,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, d->bridge_out = bridge; /* Create a bridge for this DSI channel */ - d->bridge.funcs = &mcde_dsi_bridge_funcs; d->bridge.of_node = dev->of_node; drm_bridge_add(&d->bridge); @@ -1173,9 +1173,9 @@ static int mcde_dsi_probe(struct platform_device *pdev) u32 dsi_id; int ret; - d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); - if (!d) - return -ENOMEM; + d = devm_drm_bridge_alloc(dev, struct mcde_dsi, bridge, &mcde_dsi_bridge_funcs); + if (IS_ERR(d)) + return PTR_ERR(d); d->dev = dev; platform_set_drvdata(pdev, d); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index fed3307d3374..b2408abb9d49 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2287,6 +2287,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp) } static int mtk_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); @@ -2310,7 +2311,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge, goto err_aux_register; if (mtk_dp->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge, + ret = drm_bridge_attach(encoder, mtk_dp->next_bridge, &mtk_dp->bridge, flags); if (ret) { drm_warn(mtk_dp->drm_dev, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 0fd13e6dd3f1..0f3b1ef8e497 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -765,6 +765,7 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge, } static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); @@ -783,7 +784,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, "Failed to get bridge\n"); } - return drm_bridge_attach(bridge->encoder, dpi->next_bridge, + return drm_bridge_attach(encoder, dpi->next_bridge, &dpi->bridge, flags); } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index d1f407fb7eb1..4fe1f38a3c4b 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) } static int mtk_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_dsi *dsi = bridge_to_dsi(bridge); /* Attach the panel or bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi->next_bridge, + return drm_bridge_attach(encoder, dsi->next_bridge, &dsi->bridge, flags); } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 06e4fac152b7..c9d0c335c519 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1269,6 +1269,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg } static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); @@ -1281,7 +1282,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, } if (hdmi->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge, + ret = drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags); if (ret) return ret; diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index e79f7c3ce32e..c9678dc68fa1 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode) } static int meson_encoder_cvbs_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct meson_encoder_cvbs *meson_encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge); - return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge, + return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge, &meson_encoder_cvbs->bridge, flags); } diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c index fe204437bd65..3db518e5f95d 100644 --- a/drivers/gpu/drm/meson/meson_encoder_dsi.c +++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c @@ -33,11 +33,12 @@ struct meson_encoder_dsi { container_of(x, struct meson_encoder_dsi, bridge) static int meson_encoder_dsi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge); - return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge, + return drm_bridge_attach(encoder, encoder_dsi->next_bridge, &encoder_dsi->bridge, flags); } diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 7752d8ac85f0..040c04b5dff9 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -49,11 +49,12 @@ struct meson_encoder_hdmi { container_of(x, struct meson_encoder_hdmi, bridge) static int meson_encoder_hdmi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); - return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge, + return drm_bridge_attach(encoder, encoder_hdmi->next_bridge, &encoder_hdmi->bridge, flags); } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index d8633a596f8d..69a26bb5fabd 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1100,20 +1100,6 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, return ret == 1; } -static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl, - u8 *link_status) -{ - int ret = 0, len; - - len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); - if (len != DP_LINK_STATUS_SIZE) { - DRM_ERROR("DP link status read failed, err: %d\n", len); - ret = -EINVAL; - } - - return ret; -} - static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, int *training_step) { @@ -1140,7 +1126,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, for (tries = 0; tries < maximum_retries; tries++) { drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); - ret = msm_dp_ctrl_read_link_status(ctrl, link_status); + ret = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (ret) return ret; @@ -1252,7 +1238,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); - ret = msm_dp_ctrl_read_link_status(ctrl, link_status); + ret = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (ret) return ret; @@ -1805,7 +1791,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl) u8 link_status[DP_LINK_STATUS_SIZE]; int num_lanes = ctrl->link->link_params.num_lanes; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); return drm_dp_channel_eq_ok(link_status, num_lanes); } @@ -1863,7 +1849,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); rc = msm_dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ @@ -1888,7 +1874,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (!drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index cca57e56c906..293f4745f1e2 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -296,14 +296,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev, struct msm_dp_bridge *msm_dp_bridge; struct drm_bridge *bridge; - msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL); - if (!msm_dp_bridge) - return -ENOMEM; + msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge, + msm_dp_display->is_edp ? &msm_edp_bridge_ops : + &msm_dp_bridge_ops); + if (IS_ERR(msm_dp_bridge)) + return PTR_ERR(msm_dp_bridge); msm_dp_bridge->msm_dp_display = msm_dp_display; bridge = &msm_dp_bridge->bridge; - bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops; bridge->type = msm_dp_display->connector_type; bridge->ycbcr_420_allowed = yuv_supported; diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 1a1fbb2d7d4f..92a9077959b3 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -714,21 +714,21 @@ end: static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link) { - int len; + int ret; link->prev_sink_count = link->msm_dp_link.sink_count; - len = drm_dp_read_sink_count(link->aux); - if (len < 0) { + ret = drm_dp_read_sink_count(link->aux); + if (ret < 0) { DRM_ERROR("DP parse sink count failed\n"); - return len; + return ret; } - link->msm_dp_link.sink_count = len; + link->msm_dp_link.sink_count = ret; - len = drm_dp_dpcd_read_link_status(link->aux, - link->link_status); - if (len < DP_LINK_STATUS_SIZE) { + ret = drm_dp_dpcd_read_link_status(link->aux, + link->link_status); + if (ret < 0) { DRM_ERROR("DP link status read failed\n"); - return len; + return ret; } return msm_dp_link_parse_request(link); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 4fabb01345aa..ca400924d4ee 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -434,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge, } static int dsi_mgr_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { int id = dsi_mgr_bridge_get_id(bridge); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge, + return drm_bridge_attach(encoder, msm_dsi->next_bridge, bridge, flags); } @@ -461,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi, struct drm_connector *connector; int ret; - dsi_bridge = devm_kzalloc(msm_dsi->dev->dev, - sizeof(*dsi_bridge), GFP_KERNEL); - if (!dsi_bridge) - return -ENOMEM; + dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base, + &dsi_mgr_bridge_funcs); + if (IS_ERR(dsi_bridge)) + return PTR_ERR(dsi_bridge); dsi_bridge->id = msm_dsi->id; bridge = &dsi_bridge->base; - bridge->funcs = &dsi_mgr_bridge_funcs; ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge); if (ret) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 1456354c8af4..7f71956806a2 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -498,16 +498,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi) struct hdmi_bridge *hdmi_bridge; int ret; - hdmi_bridge = devm_kzalloc(hdmi->dev->dev, - sizeof(*hdmi_bridge), GFP_KERNEL); - if (!hdmi_bridge) - return -ENOMEM; + hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base, + &msm_hdmi_bridge_funcs); + if (IS_ERR(hdmi_bridge)) + return PTR_ERR(hdmi_bridge); hdmi_bridge->hdmi = hdmi; INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work); bridge = &hdmi_bridge->base; - bridge->funcs = &msm_hdmi_bridge_funcs; bridge->ddc = hdmi->i2c; bridge->type = DRM_MODE_CONNECTOR_HDMIA; bridge->vendor = "Qualcomm"; @@ -515,6 +514,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi) bridge->ops = DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_HDMI | + DRM_BRIDGE_OP_HDMI_AUDIO | DRM_BRIDGE_OP_EDID; bridge->hdmi_audio_max_i2s_playback_channels = 8; bridge->hdmi_audio_dev = &hdmi->pdev->dev; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c3588dc9e537..f316e6776f67 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -671,7 +671,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value); break; case MSM_INFO_GET_FLAGS: - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = -EINVAL; break; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ebc9ba66efb8..2995e80fec3b 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -735,7 +735,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) msm_gem_assert_locked(obj); - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return ERR_PTR(-ENODEV); pages = msm_gem_get_pages_locked(obj, madv); @@ -1074,7 +1074,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj) put_iova_spaces(obj, true); - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { GEM_WARN_ON(msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 85f0257e83da..ba5c4ff76292 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -224,7 +224,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj) /* imported/exported objects are not purgeable: */ static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) { - return msm_obj->base.import_attach || msm_obj->pin_count; + return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count; } static inline bool is_purgeable(struct msm_gem_object *msm_obj) diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index ee267490c935..2e37913d5a6a 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -50,7 +50,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj) struct page **pages; int ret = 0; - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return 0; pages = msm_gem_pin_pages_locked(obj); @@ -62,7 +62,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj) void msm_gem_prime_unpin(struct drm_gem_object *obj) { - if (obj->import_attach) + if (drm_gem_is_imported(obj)) return; msm_gem_unpin_pages_locked(obj); diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 8ee00f59ca82..fcb2a7517377 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct lcdif_drm_private *lcdif; - struct resource *res; int ret; lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL); @@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm) lcdif->drm = drm; drm->dev_private = lcdif; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lcdif->base = devm_ioremap_resource(drm->dev, res); + lcdif->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lcdif->base)) return PTR_ERR(lcdif->base); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 59020862cf65..c183b1112bc4 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -8,6 +8,7 @@ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved. */ +#include <linux/aperture.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> @@ -215,7 +216,6 @@ static int mxsfb_load(struct drm_device *drm, { struct platform_device *pdev = to_platform_device(drm->dev); struct mxsfb_drm_private *mxsfb; - struct resource *res; int ret; mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL); @@ -226,8 +226,7 @@ static int mxsfb_load(struct drm_device *drm, drm->dev_private = mxsfb; mxsfb->devdata = devdata; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mxsfb->base = devm_ioremap_resource(drm->dev, res); + mxsfb->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxsfb->base)) return PTR_ERR(mxsfb->base); @@ -361,6 +360,15 @@ static int mxsfb_probe(struct platform_device *pdev) if (ret) goto err_free; + /* + * Remove early framebuffers (ie. simplefb). The framebuffer can be + * located anywhere in RAM + */ + ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't kick out existing framebuffers\n"); + ret = drm_dev_register(drm, 0); if (ret) goto err_unload; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 504cb3f2054b..9bed728cb00e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -775,10 +775,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, union hdmi_infoframe infoframe = { 0 }; const u8 rekey = 56; /* binary driver, and tegra, constant */ u32 max_ac_packet; - struct { - struct nvif_outp_infoframe_v0 infoframe; - u8 data[17]; - } args = { 0 }; + DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17); + const u8 data_len = __member_size(args->data); int ret, size; max_ac_packet = mode->htotal - mode->hdisplay; @@ -815,29 +813,29 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, return; /* AVI InfoFrame. */ - args.infoframe.version = 0; - args.infoframe.head = nv_crtc->index; + args->version = 0; + args->head = nv_crtc->index; if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) { drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode, HDMI_QUANTIZATION_RANGE_FULL); - size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data)); + size = hdmi_infoframe_pack(&infoframe, args->data, data_len); } else { size = 0; } - nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size); + nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, args, size); /* Vendor InfoFrame. */ - memset(&args.data, 0, sizeof(args.data)); + memset(args->data, 0, data_len); if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, &nv_connector->base, mode)) - size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data)); + size = hdmi_infoframe_pack(&infoframe, args->data, data_len); else size = 0; - nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size); + nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, args, size); nv_encoder->hdmi.enabled = true; } diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 746e126c3ecf..1c12854a8550 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -31,6 +31,29 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc); struct nvkm_gsp_event; typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc); +/** + * DOC: GSP message handling policy + * + * When sending a GSP RPC command, there can be multiple cases of handling + * the GSP RPC messages, which are the reply of GSP RPC commands, according + * to the requirement of the callers and the nature of the GSP RPC commands. + * + * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the + * caller after the GSP RPC command is issued. + * + * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP + * RPC message after the GSP RPC command is issued. + * + * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and + * discard the reply before returning to the caller. + * + */ +enum nvkm_gsp_rpc_reply_policy { + NVKM_GSP_RPC_REPLY_NOWAIT = 0, + NVKM_GSP_RPC_REPLY_RECV, + NVKM_GSP_RPC_REPLY_POLL, +}; + struct nvkm_gsp { const struct nvkm_gsp_func *func; struct nvkm_subdev subdev; @@ -188,7 +211,8 @@ struct nvkm_gsp { const struct nvkm_gsp_rm { void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc); - void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc); + void *(*rpc_push)(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy, u32 repc); void (*rpc_done)(struct nvkm_gsp *gsp, void *repv); void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc); @@ -255,9 +279,10 @@ nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) } static inline void * -nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) +nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy, u32 repc) { - return gsp->rm->rpc_push(gsp, argv, wait, repc); + return gsp->rm->rpc_push(gsp, argv, policy, repc); } static inline void * @@ -268,13 +293,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc) if (IS_ERR_OR_NULL(argv)) return argv; - return nvkm_gsp_rpc_push(gsp, argv, true, argc); + return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc); } static inline int -nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait) +nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy) { - void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0); + void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0); if (IS_ERR(repv)) return PTR_ERR(repv); diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index cd659b9fd1d9..1286a664f688 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -270,10 +270,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, { NV03_CHANNEL_DMA , 0 }, {} }; - struct { - struct nvif_chan_v0 chan; - char name[TASK_COMM_LEN+16]; - } args; + DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16); struct nvif_device *device = &cli->device; struct nouveau_channel *chan; const u64 plength = 0x10000; @@ -298,28 +295,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, return ret; /* create channel object */ - args.chan.version = 0; - args.chan.namelen = sizeof(args.name); - args.chan.runlist = __ffs64(runm); - args.chan.runq = 0; - args.chan.priv = priv; - args.chan.devm = BIT(0); + args->version = 0; + args->namelen = __member_size(args->name); + args->runlist = __ffs64(runm); + args->runq = 0; + args->priv = priv; + args->devm = BIT(0); if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) { - args.chan.vmm = 0; - args.chan.ctxdma = nvif_handle(&chan->push.ctxdma); - args.chan.offset = chan->push.addr; - args.chan.length = 0; + args->vmm = 0; + args->ctxdma = nvif_handle(&chan->push.ctxdma); + args->offset = chan->push.addr; + args->length = 0; } else { - args.chan.vmm = nvif_handle(&chan->vmm->vmm.object); + args->vmm = nvif_handle(&chan->vmm->vmm.object); if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO) - args.chan.ctxdma = nvif_handle(&chan->push.ctxdma); + args->ctxdma = nvif_handle(&chan->push.ctxdma); else - args.chan.ctxdma = 0; - args.chan.offset = ioffset + chan->push.addr; - args.chan.length = ilength; + args->ctxdma = 0; + args->offset = ioffset + chan->push.addr; + args->length = ilength; } - args.chan.huserd = 0; - args.chan.ouserd = 0; + args->huserd = 0; + args->ouserd = 0; /* allocate userd */ if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) { @@ -329,27 +326,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, if (ret) return ret; - args.chan.huserd = nvif_handle(&chan->mem_userd.object); - args.chan.ouserd = 0; + args->huserd = nvif_handle(&chan->mem_userd.object); + args->ouserd = 0; chan->userd = &chan->mem_userd.object; } else { chan->userd = &chan->user; } - snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current)); + snprintf(args->name, __member_size(args->name), "%s[%d]", + current->comm, task_pid_nr(current)); ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass, - &args, sizeof(args), &chan->user); + args, __struct_size(args), &chan->user); if (ret) { nouveau_channel_del(pchan); return ret; } - chan->runlist = args.chan.runlist; - chan->chid = args.chan.chid; - chan->inst = args.chan.inst; - chan->token = args.chan.token; + chan->runlist = args->runlist; + chan->chid = args->chid; + chan->inst = args->inst; + chan->token = args->token; return 0; } @@ -367,17 +365,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) return ret; if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) { - struct { - struct nvif_event_v0 base; - struct nvif_chan_event_v0 host; - } args; + DEFINE_RAW_FLEX(struct nvif_event_v0, args, data, + sizeof(struct nvif_chan_event_v0)); + struct nvif_chan_event_v0 *host = + (struct nvif_chan_event_v0 *)args->data; - args.host.version = 0; - args.host.type = NVIF_CHAN_EVENT_V0_KILLED; + host->version = 0; + host->type = NVIF_CHAN_EVENT_V0_KILLED; ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid, nouveau_channel_killed, false, - &args.base, sizeof(args), &chan->kill); + args, __struct_size(args), &chan->kill); if (ret == 0) ret = nvif_event_allow(&chan->kill); if (ret) { @@ -520,46 +518,44 @@ nouveau_channels_fini(struct nouveau_drm *drm) int nouveau_channels_init(struct nouveau_drm *drm) { - struct { - struct nv_device_info_v1 m; - struct { - struct nv_device_info_v1_data channels; - struct nv_device_info_v1_data runlists; - } v; - } args = { - .m.version = 1, - .m.count = sizeof(args.v) / sizeof(args.v.channels), - .v.channels.mthd = NV_DEVICE_HOST_CHANNELS, - .v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS, - }; + DEFINE_RAW_FLEX(struct nv_device_info_v1, args, data, 2); + struct nv_device_info_v1_data *channels = &args->data[0]; + struct nv_device_info_v1_data *runlists = &args->data[1]; struct nvif_object *device = &drm->client.device.object; int ret, i; - ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args)); + args->version = 1; + args->count = __member_size(args->data) / sizeof(*args->data); + channels->mthd = NV_DEVICE_HOST_CHANNELS; + runlists->mthd = NV_DEVICE_HOST_RUNLISTS; + + ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args, + __struct_size(args)); if (ret || - args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data || - args.v.channels.mthd == NV_DEVICE_INFO_INVALID) + runlists->mthd == NV_DEVICE_INFO_INVALID || !runlists->data || + channels->mthd == NV_DEVICE_INFO_INVALID) return -ENODEV; - drm->chan_nr = drm->chan_total = args.v.channels.data; - drm->runl_nr = fls64(args.v.runlists.data); + drm->chan_nr = drm->chan_total = channels->data; + drm->runl_nr = fls64(runlists->data); drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL); if (!drm->runl) return -ENOMEM; if (drm->chan_nr == 0) { for (i = 0; i < drm->runl_nr; i++) { - if (!(args.v.runlists.data & BIT(i))) + if (!(runlists->data & BIT(i))) continue; - args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS; - args.v.channels.data = i; + channels->mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS; + channels->data = i; - ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args)); - if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID) + ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args, + __struct_size(args)); + if (ret || channels->mthd == NV_DEVICE_INFO_INVALID) return -ENODEV; - drm->runl[i].chan_nr = args.v.channels.data; + drm->runl[i].chan_nr = channels->data; drm->runl[i].chan_id_base = drm->chan_total; drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index e154d08857c5..c69139701056 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1079,6 +1079,10 @@ nouveau_pmops_freeze(struct device *dev) { struct nouveau_drm *drm = dev_get_drvdata(dev); + if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF || + drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + return 0; + return nouveau_do_suspend(drm, false); } @@ -1087,6 +1091,10 @@ nouveau_pmops_thaw(struct device *dev) { struct nouveau_drm *drm = dev_get_drvdata(dev); + if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF || + drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) + return 0; + return nouveau_do_resume(drm, false); } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index edddfc036c6d..6ded8c2b6d3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -184,10 +184,10 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha struct nouveau_cli *cli = chan->cli; struct nouveau_drm *drm = cli->drm; struct nouveau_fence_priv *priv = (void*)drm->fence; - struct { - struct nvif_event_v0 base; - struct nvif_chan_event_v0 host; - } args; + DEFINE_RAW_FLEX(struct nvif_event_v0, args, data, + sizeof(struct nvif_chan_event_v0)); + struct nvif_chan_event_v0 *host = + (struct nvif_chan_event_v0 *)args->data; int ret; INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work); @@ -207,12 +207,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha if (!priv->uevent) return; - args.host.version = 0; - args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR; + host->version = 0; + host->type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR; ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid, nouveau_fence_wait_uevent_handler, false, - &args.base, sizeof(args), &fctx->event); + args, __struct_size(args), &fctx->event); WARN_ON(ret); } diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index e12e2596ed84..6fa387da0637 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -720,10 +720,7 @@ nouveau_svm_fault(struct work_struct *work) struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]); struct nvif_object *device = &svm->drm->client.device.object; struct nouveau_svmm *svmm; - struct { - struct nouveau_pfnmap_args i; - u64 phys[1]; - } args; + DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1); unsigned long hmm_flags; u64 inst, start, limit; int fi, fn; @@ -772,11 +769,11 @@ nouveau_svm_fault(struct work_struct *work) mutex_unlock(&svm->mutex); /* Process list of faults. */ - args.i.i.version = 0; - args.i.i.type = NVIF_IOCTL_V0_MTHD; - args.i.m.version = 0; - args.i.m.method = NVIF_VMM_V0_PFNMAP; - args.i.p.version = 0; + args->i.version = 0; + args->i.type = NVIF_IOCTL_V0_MTHD; + args->m.version = 0; + args->m.method = NVIF_VMM_V0_PFNMAP; + args->p.version = 0; for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { struct svm_notifier notifier; @@ -802,9 +799,9 @@ nouveau_svm_fault(struct work_struct *work) * fault window, determining required pages and access * permissions based on pending faults. */ - args.i.p.addr = start; - args.i.p.page = PAGE_SHIFT; - args.i.p.size = PAGE_SIZE; + args->p.addr = start; + args->p.page = PAGE_SHIFT; + args->p.size = PAGE_SIZE; /* * Determine required permissions based on GPU fault * access flags. @@ -832,16 +829,16 @@ nouveau_svm_fault(struct work_struct *work) notifier.svmm = svmm; if (atomic) - ret = nouveau_atomic_range_fault(svmm, svm->drm, - &args.i, sizeof(args), + ret = nouveau_atomic_range_fault(svmm, svm->drm, args, + __struct_size(args), ¬ifier); else - ret = nouveau_range_fault(svmm, svm->drm, &args.i, - sizeof(args), hmm_flags, - ¬ifier); + ret = nouveau_range_fault(svmm, svm->drm, args, + __struct_size(args), + hmm_flags, ¬ifier); mmput(mm); - limit = args.i.p.addr + args.i.p.size; + limit = args->p.addr + args->p.size; for (fn = fi; ++fn < buffer->fault_nr; ) { /* It's okay to skip over duplicate addresses from the * same SVMM as faults are ordered by access type such @@ -855,14 +852,14 @@ nouveau_svm_fault(struct work_struct *work) if (buffer->fault[fn]->svmm != svmm || buffer->fault[fn]->addr >= limit || (buffer->fault[fi]->access == FAULT_ACCESS_READ && - !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) || + !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) || (buffer->fault[fi]->access != FAULT_ACCESS_READ && buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH && - !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) || + !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) || (buffer->fault[fi]->access != FAULT_ACCESS_READ && buffer->fault[fi]->access != FAULT_ACCESS_WRITE && buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH && - !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A))) + !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A))) break; } diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c index 9ee18cb99264..5a1a83c62a2a 100644 --- a/drivers/gpu/drm/nouveau/nvif/conn.c +++ b/drivers/gpu/drm/nouveau/nvif/conn.c @@ -30,17 +30,17 @@ int nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types, struct nvif_event *event) { - struct { - struct nvif_event_v0 base; - struct nvif_conn_event_v0 conn; - } args; + DEFINE_RAW_FLEX(struct nvif_event_v0, args, data, + sizeof(struct nvif_conn_event_v0)); + struct nvif_conn_event_v0 *args_conn = + (struct nvif_conn_event_v0 *)args->data; int ret; - args.conn.version = 0; - args.conn.types = types; + args_conn->version = 0; + args_conn->types = types; ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn), - func, true, &args.base, sizeof(args), false, event); + func, true, args, __struct_size(args), false, event); NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c index 6daeb7f0b09b..32f6c5eb92af 100644 --- a/drivers/gpu/drm/nouveau/nvif/outp.c +++ b/drivers/gpu/drm/nouveau/nvif/outp.c @@ -195,20 +195,17 @@ nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable) int nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size) { - struct { - struct nvif_outp_hda_eld_v0 mthd; - u8 data[128]; - } args; + DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128); int ret; - if (WARN_ON(size > ARRAY_SIZE(args.data))) + if (WARN_ON(size > __member_size(mthd->data))) return -EINVAL; - args.mthd.version = 0; - args.mthd.head = head; + mthd->version = 0; + mthd->head = head; - memcpy(args.data, data, size); - ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size); + memcpy(mthd->data, data, size); + ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, mthd, sizeof(*mthd) + size); NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size); return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c index 3a30bea30e36..90186f98065c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c @@ -56,7 +56,7 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index db2602e88006..969f6b921fdb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -585,13 +585,37 @@ r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) } static void * -r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait, - u32 gsp_rpc_len) +r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, + enum nvkm_gsp_rpc_reply_policy policy, + u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *reply; + void *repv = NULL; + + switch (policy) { + case NVKM_GSP_RPC_REPLY_NOWAIT: + break; + case NVKM_GSP_RPC_REPLY_RECV: + reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); + if (!IS_ERR_OR_NULL(reply)) + repv = reply->data; + else + repv = reply; + break; + case NVKM_GSP_RPC_REPLY_POLL: + repv = r535_gsp_msg_recv(gsp, fn, 0); + break; + } + + return repv; +} + +static void * +r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) { struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); - struct nvfw_gsp_rpc *msg; u32 fn = rpc->function; - void *repv = NULL; int ret; if (gsp->subdev.debug >= NV_DBG_TRACE) { @@ -605,15 +629,7 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait, if (ret) return ERR_PTR(ret); - if (wait) { - msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); - if (!IS_ERR_OR_NULL(msg)) - repv = msg->data; - else - repv = msg; - } - - return repv; + return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len); } static void @@ -797,7 +813,7 @@ r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) rpc->params.hRoot = client->object.handle; rpc->params.hObjectParent = 0; rpc->params.hObjectOld = object->handle; - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } static void @@ -815,7 +831,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params) struct nvkm_gsp *gsp = object->client->gsp; void *ret = NULL; - rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc)); + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc)); if (IS_ERR_OR_NULL(rpc)) return rpc; @@ -876,7 +892,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 rep struct nvkm_gsp *gsp = object->client->gsp; int ret = 0; - rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc); if (IS_ERR_OR_NULL(rpc)) { *params = NULL; return PTR_ERR(rpc); @@ -948,8 +964,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size) } static void * -r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, - u32 gsp_rpc_len) +r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) { struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); @@ -967,7 +983,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, rpc->length = sizeof(*rpc) + max_payload_size; msg->checksum = rpc->length; - repv = r535_gsp_rpc_send(gsp, payload, false, 0); + repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0); if (IS_ERR(repv)) goto done; @@ -988,7 +1004,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, memcpy(next, payload, size); - repv = r535_gsp_rpc_send(gsp, next, false, 0); + repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); if (IS_ERR(repv)) goto done; @@ -997,20 +1013,10 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, } /* Wait for reply. */ - rpc = r535_gsp_msg_recv(gsp, fn, payload_size + - sizeof(*rpc)); - if (!IS_ERR_OR_NULL(rpc)) { - if (wait) { - repv = rpc->data; - } else { - nvkm_gsp_rpc_done(gsp, rpc); - repv = NULL; - } - } else { - repv = wait ? rpc : NULL; - } + repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size + + sizeof(*rpc)); } else { - repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len); + repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len); } done: @@ -1327,7 +1333,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; } - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } enum registry_type { @@ -1684,7 +1690,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) build_registry(gsp, rpc); - return nvkm_gsp_rpc_wr(gsp, rpc, false); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); fail: clean_registry(gsp); @@ -1893,7 +1899,7 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) info->pciConfigMirrorSize = 0x001000; r535_gsp_acpi_info(gsp, &info->acpiMethodData); - return nvkm_gsp_rpc_wr(gsp, info, false); + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c index 5f3c9c02a4c0..35ba1798ee6e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c @@ -105,7 +105,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; } - ret = nvkm_gsp_rpc_wr(gsp, rpc, true); + ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL); if (ret) return ret; diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index b17e77f700dd..6eff97a09160 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi) */ static int dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dpi_data *dpi = drm_bridge_to_dpi(bridge); @@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge, dpi_init_pll(dpi); - return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge, + return drm_bridge_attach(encoder, dpi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 9b9cc593790c..91ee63bfe0bc 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = { */ static int dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dsi_data *dsi = drm_bridge_to_dsi(bridge); @@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge, + return drm_bridge_attach(encoder, dsi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index e1ac447221ee..a3b22952fdc3 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core) */ static int hdmi4_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge); @@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge, + return drm_bridge_attach(encoder, hdmi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index fa9904e4c218..0c98444d39a9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) */ static int hdmi5_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge); @@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge, + return drm_bridge_attach(encoder, hdmi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index f9ae358e8e52..e78826e4b560 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi) */ static int sdi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); @@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge, + return drm_bridge_attach(encoder, sdi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index aaeef603682c..50349518eda1 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc) */ static int venc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct venc_device *venc = drm_bridge_to_venc(bridge); @@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, venc->output.next_bridge, + return drm_bridge_attach(encoder, venc->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index e059b06e0239..7e9c60a626fb 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -154,6 +154,17 @@ config DRM_PANEL_LVDS handling of power supplies or control signals. It implements automatic backlight handling if the panel is attached to a backlight controller. +config DRM_PANEL_HIMAX_HX8279 + tristate "Himax HX8279-based panels" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y if you want to enable support for panels based on the + Himax HX8279 controller, such as the Startek KD070FHFID078 + 7.0" 1200x1920 IPS LCD panel that uses a MIPI-DSI interface + and others. + config DRM_PANEL_HIMAX_HX83102 tristate "Himax HX83102-based panels" depends on OF @@ -996,6 +1007,15 @@ config DRM_PANEL_TRULY_NT35597_WQXGA Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI Video Mode panel +config DRM_PANEL_VISIONOX_G2647FB105 + tristate "Visionox G2647FB105" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the Visionox + G2647FB105 (2340x1080@60Hz) AMOLED DSI cmd mode panel. + config DRM_PANEL_VISIONOX_R66451 tristate "Visionox R66451" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 1bb8ae46b59b..883974f0cba1 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o +obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o @@ -101,6 +102,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o +obj-$(CONFIG_DRM_PANEL_VISIONOX_G2647FB105) += panel-visionox-g2647fb105.o obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c index 4692c36fe217..87fb0fd29658 100644 --- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -279,9 +279,10 @@ static int y030xx067a_probe(struct spi_device *spi) struct y030xx067a *priv; int err; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + priv = devm_drm_panel_alloc(dev, struct y030xx067a, panel, + &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(priv)) + return PTR_ERR(priv); priv->spi = spi; spi_set_drvdata(spi, priv); @@ -306,9 +307,6 @@ static int y030xx067a_probe(struct spi_device *spi) return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO\n"); - drm_panel_init(&priv->panel, dev, &y030xx067a_funcs, - DRM_MODE_CONNECTOR_DPI); - err = drm_panel_of_backlight(&priv->panel); if (err) return err; diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index 503ecea72c5e..ea5119018df4 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -306,9 +306,11 @@ static int versatile_panel_probe(struct platform_device *pdev) return PTR_ERR(map); } - vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL); - if (!vpanel) - return -ENOMEM; + vpanel = devm_drm_panel_alloc(dev, struct versatile_panel, panel, + &versatile_panel_drm_funcs, + DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(vpanel)) + return PTR_ERR(vpanel); ret = regmap_read(map, SYS_CLCD, &val); if (ret) { @@ -348,9 +350,6 @@ static int versatile_panel_probe(struct platform_device *pdev) dev_info(dev, "panel mounted on IB2 daughterboard\n"); } - drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs, - DRM_MODE_CONNECTOR_DPI); - drm_panel_add(&vpanel->panel); return 0; diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c index b05a663c134c..db006576d704 100644 --- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c +++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c @@ -224,9 +224,11 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi) struct tm5p5_nt35596 *ctx; int ret; - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_panel_alloc(dev, struct tm5p5_nt35596, panel, + &tm5p5_nt35596_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); ctx->supplies[0].supply = "vdd"; ctx->supplies[1].supply = "vddio"; @@ -253,9 +255,6 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi) MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; - drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi); if (IS_ERR(ctx->panel.backlight)) { ret = PTR_ERR(ctx->panel.backlight); diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c index 77604d6a4e72..6e52bf6830e1 100644 --- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c +++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c @@ -200,9 +200,10 @@ static int a030jtn01_probe(struct spi_device *spi) spi->mode |= SPI_MODE_3 | SPI_3WIRE; - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + priv = devm_drm_panel_alloc(dev, struct a030jtn01, panel, + &a030jtn01_funcs, DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(priv)) + return PTR_ERR(priv); priv->spi = spi; spi_set_drvdata(spi, priv); @@ -223,9 +224,6 @@ static int a030jtn01_probe(struct spi_device *spi) if (IS_ERR(priv->reset_gpio)) return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO"); - drm_panel_init(&priv->panel, dev, &a030jtn01_funcs, - DRM_MODE_CONNECTOR_DPI); - err = drm_panel_of_backlight(&priv->panel); if (err) return err; diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c index 7e66db4a88bb..84c21c62a43e 100644 --- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c +++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c @@ -55,77 +55,56 @@ static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe) static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe) { struct mipi_dsi_device *dsi = boe->dsi; - struct device *dev = &dsi->dev; - int ret; - - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE); - mipi_dsi_dcs_write_seq(dsi, 0xf8, - 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d); - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(30); - - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc0, - 0x08, 0x48, 0x65, 0x33, 0x33, 0x33, - 0x2a, 0x31, 0x39, 0x20, 0x09); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, - 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); - mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92, - 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83, - 0x5c, 0x5c, 0x5c); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e); - - msleep(30); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(50); - - return 0; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x00, 0x4c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, + 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d); + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 30); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, + 0x08, 0x48, 0x65, 0x33, 0x33, 0x33, + 0x2a, 0x31, 0x39, 0x20, 0x09); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92, + 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83, + 0x5c, 0x5c, 0x5c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e); + + mipi_dsi_msleep(&dsi_ctx, 30); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); + + return dsi_ctx.accum_err; } -static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe) +static void boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe) { struct mipi_dsi_device *dsi = boe->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; /* OFF commands sent in HS mode */ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(20); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - usleep_range(1000, 2000); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000); dsi->mode_flags |= MIPI_DSI_MODE_LPM; - - return 0; } static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel) { struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); - struct device *dev = &boe->dsi->dev; int ret; /* @@ -157,13 +136,14 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel) ret = boe_bf060y8m_aj0_on(boe); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(boe->reset_gpio, 1); - return ret; + goto err_on; } return 0; +err_on: + regulator_disable(boe->vregs[BF060Y8M_VREG_VCI].consumer); err_vci: regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); err_vddio: @@ -178,15 +158,11 @@ err_elvss: static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel) { struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); - struct device *dev = &boe->dsi->dev; - int ret; - ret = boe_bf060y8m_aj0_off(boe); - if (ret < 0) - dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + boe_bf060y8m_aj0_off(boe); gpiod_set_value_cansleep(boe->reset_gpio, 1); - ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs); + regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs); return 0; } @@ -234,13 +210,11 @@ static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = backlight_get_brightness(bl); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); - if (ret < 0) - return ret; + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness); - return 0; + return dsi_ctx.accum_err; } static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl) @@ -350,9 +324,11 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi) struct boe_bf060y8m_aj0 *boe; int ret; - boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL); - if (!boe) - return -ENOMEM; + boe = devm_drm_panel_alloc(dev, struct boe_bf060y8m_aj0, panel, + &boe_bf060y8m_aj0_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(boe)) + return PTR_ERR(boe); ret = boe_bf060y8m_aj0_init_vregs(boe, dev); if (ret) @@ -374,9 +350,6 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi) MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; - drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - boe->panel.prepare_prev_first = true; boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi); diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c index 0b87f1e6ecae..f33d4f855929 100644 --- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c +++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c @@ -349,9 +349,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi) const struct panel_desc *desc; int ret; - ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_panel_alloc(&dsi->dev, struct boe_th101mb31ig002, panel, + &boe_th101mb31ig002_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; @@ -383,9 +385,6 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi) return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n"); - drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs, - DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c index 50e4a5341bc6..20b6e11a7d84 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c @@ -166,9 +166,11 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi) struct boe_tv101wum_ll2 *ctx; int ret; - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_panel_alloc(dev, struct boe_tv101wum_ll2, panel, + &boe_tv101wum_ll2_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); ret = devm_regulator_bulk_get_const(&dsi->dev, ARRAY_SIZE(boe_tv101wum_ll2_supplies), @@ -190,8 +192,6 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_HSE; - drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs, - DRM_MODE_CONNECTOR_DSI); ctx->panel.prepare_prev_first = true; ret = drm_panel_of_backlight(&ctx->panel); diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c index 6b3f4d664d2a..ae6e9ffc46cb 100644 --- a/drivers/gpu/drm/panel/panel-dsi-cm.c +++ b/drivers/gpu/drm/panel/panel-dsi-cm.c @@ -511,9 +511,10 @@ static int dsicm_probe(struct mipi_dsi_device *dsi) dev_dbg(dev, "probe\n"); - ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; + ddata = devm_drm_panel_alloc(dev, struct panel_drv_data, panel, + &dsicm_panel_funcs, DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ddata)) + return PTR_ERR(ddata); mipi_dsi_set_drvdata(dsi, ddata); ddata->dsi = dsi; @@ -530,9 +531,6 @@ static int dsicm_probe(struct mipi_dsi_device *dsi) dsicm_hw_reset(ddata); - drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - if (ddata->use_dsi_backlight) { struct backlight_properties props = { 0 }; props.max_brightness = 255; diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c index 0bfed0ec0bbc..fb9f9f42be4f 100644 --- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c +++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c @@ -163,9 +163,11 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi) struct ebbg_ft8719 *ctx; int i, ret; - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_panel_alloc(dev, struct ebbg_ft8719, panel, + &ebbg_ft8719_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) ctx->supplies[i].supply = regulator_names[i]; @@ -196,9 +198,6 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_CLOCK_NON_CONTINUOUS; - drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&ctx->panel); if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n"); diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 52028c8f8988..e8fe0014143f 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -839,9 +839,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, struct device_node *ddc; int err; - panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); - if (!panel) - return -ENOMEM; + panel = devm_drm_panel_alloc(dev, struct panel_edp, base, + &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP); + if (IS_ERR(panel)) + return PTR_ERR(panel); panel->prepared_time = 0; panel->desc = desc; @@ -886,8 +887,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, dev_set_drvdata(dev, panel); - drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP); - err = drm_panel_of_backlight(&panel->base); if (err) goto err_finished_ddc_init; diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c new file mode 100644 index 000000000000..fb302d1f91b9 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c @@ -0,0 +1,1296 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Himax HX8279 DriverIC panels driver + * + * Copyright (c) 2025 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +/* Page selection */ +#define HX8279_REG_PAGE 0xb0 + #define HX8279_PAGE_SEL GENMASK(3, 0) + +/* Page 0 - Driver/Module Configuration */ +#define HX8279_P0_VGHS 0xbf +#define HX8279_P0_VGLS 0xc0 +#define HX8279_P0_VGPHS 0xc2 +#define HX8279_P0_VGNHS 0xc4 + #define HX8279_P0_VG_SEL GENMASK(4, 0) + #define HX8279_VGH_MIN_MV 8700 + #define HX8279_VGH_STEP_MV 300 + #define HX8279_VGL_MIN_MV 6700 + #define HX8279_VGL_STEP_MV 300 + #define HX8279_VGPNH_MIN_MV 4000 + #define HX8279_VGPNX_STEP_MV 50 + #define HX8279_VGH_VOLT_SEL(x) ((x - HX8279_VGH_MIN_MV) / HX8279_VGH_STEP_MV) + #define HX8279_VGL_VOLT_SEL(x) ((x - HX8279_VGL_MIN_MV) / HX8279_VGL_STEP_MV) + #define HX8279_VGPN_VOLT_SEL(x) ((x - HX8279_VGPNH_MIN_MV) / HX8279_VGPNX_STEP_MV) + +/* Page 1 - Gate driver On Array (GOA) Mux */ +#define HX8279_P1_REG_GOA_L 0xc0 +#define HX8279_P1_REG_GOUTL(x) (HX8279_P1_REG_GOA_L + (x)) +#define HX8279_P1_REG_GOA_R 0xd4 +#define HX8279_P1_REG_GOUTR(x) (HX8279_P1_REG_GOA_R + (x)) + #define HX8279_GOUT_STB GENMASK(7, 6) + #define HX8279_GOUT_SEL GENMASK(5, 0) + +/* Page 2 - Analog Gamma Configuration */ +#define HX8279_P2_REG_ANALOG_GAMMA 0xc0 + #define HX8279_P2_REG_GAMMA_T_PVP(x) (HX8279_P2_REG_ANALOG_GAMMA + (x)) /* 0..16 */ + #define HX8279_P2_REG_GAMMA_T_PVN(x) (HX8279_P2_REG_GAMMA_T_PVP(17) + (x)) /* 0..16 */ + +/* Page 3 - Gate driver On Array (GOA) Configuration */ +#define HX8279_P3_REG_UNKNOWN_BA 0xba +#define HX8279_P3_REG_GOA_CKV_FALL_PREC 0xbc +#define HX8279_P3_REG_GOA_TIMING_ODD 0xc2 + #define HX8279_P3_REG_GOA_TO(x) (HX8279_P3_REG_GOA_TIMING_ODD + x) /* GOA_T0..5 */ +#define HX8279_P3_REG_GOA_STVL 0xc8 + #define HX8279_P3_GOA_STV_LEAD GENMASK(4, 0) +#define HX8279_P3_REG_GOA_CKVL 0xc9 + #define HX8279_P3_GOA_CKV_LEAD GENMASK(4, 0) +#define HX8279_P3_REG_GOA_CKVD 0xca + #define HX8279_P3_GOA_CKV_NONOVERLAP BIT(7) + #define HX8279_P3_GOA_CKV_RESERVED BIT(6) + #define HX8279_P3_GOA_CKV_DUMMY GENMASK(5, 0) +#define HX8279_P3_REG_GOA_CKV_RISE_PREC 0xcb +#define HX8279_P3_REG_GOA_CLR1_W_ADJ 0xd2 +#define HX8279_P3_REG_GOA_CLR234_W_ADJ 0xd3 +#define HX8279_P3_REG_GOA_CLR1_CFG 0xd4 +#define HX8279_P3_REG_GOA_CLR_CFG(x) (HX8279_P3_REG_GOA_CLR1_CFG + (x)) /* CLR1..4 */ + #define HX8279_P3_GOA_CLR_CFG_POLARITY BIT(7) + #define HX8279_P3_GOA_CLR_CFG_STARTPOS GENMASK(6, 0) +#define HX8279_P3_REG_GOA_TIMING_EVEN 0xdd + #define HX8279_P3_REG_GOA_TE(x) (HX8279_P3_REG_GOA_TIMING_EVEN + x) +#define HX8279_P3_REG_UNKNOWN_E4 0xe4 +#define HX8279_P3_REG_UNKNOWN_E5 0xe5 + +/* Page 5 - MIPI */ +#define HX8279_P5_REG_TIMING 0xb3 + #define HX8279_P5_TIMING_THS_SETTLE GENMASK(7, 5) + #define HX8279_P5_TIMING_LHS_SETTLE BIT(4) + #define HX8279_P5_TIMING_TLPX GENMASK(3, 0) +#define HX8279_P5_REG_UNKNOWN_B8 0xb8 +#define HX8279_P5_REG_UNKNOWN_BC 0xbc +#define HX8279_P5_REG_UNKNOWN_D6 0xd6 + +/* Page 6 - Engineer */ +#define HX8279_P6_REG_ENGINEER_PWD 0xb8 +#define HX8279_P6_REG_INHOUSE_FUNC 0xc0 + #define HX8279_P6_ENG_UNLOCK_WORD 0xa5 +#define HX8279_P6_REG_GAMMA_CHOPPER 0xbc + #define HX8279_P6_GAMMA_POCGM_CTL GENMASK(6, 4) + #define HX8279_P6_GAMMA_POGCMD_CTL GENMASK(2, 0) +#define HX8279_P6_REG_VOLT_ADJ 0xc7 + /* For VCCIFS and VCCS - 0: 1450, 1: 1500, 2: 1550, 3: 1600 uV */ + #define HX8279_P6_VOLT_ADJ_VCCIFS GENMASK(3, 2) + #define HX8279_P6_VOLT_ADJ_VCCS GENMASK(1, 0) +#define HX8279_P6_REG_DLY_TIME_ADJ 0xd5 + +/* Page 7...12 - Digital Gamma Adjustment */ +#define HX8279_PG_DIGITAL_GAMMA 0xb1 +#define HX8279_DGAMMA_DGMA1_HI GENMASK(7, 6) +#define HX8279_DGAMMA_DGMA2_HI GENMASK(5, 4) +#define HX8279_DGAMMA_DGMA3_HI GENMASK(3, 2) +#define HX8279_DGAMMA_DGMA4_HI GENMASK(1, 0) +#define HX8279_PG_DGAMMA_NUM_LO_BYTES 24 +#define HX8279_PG_DGAMMA_NUM_HI_BYTES 6 + +struct hx8279 { + struct drm_panel panel; + struct mipi_dsi_device *dsi[2]; + struct regulator_bulk_data vregs[2]; + struct gpio_desc *enable_gpio; + struct gpio_desc *reset_gpio; + const struct hx8279_panel_desc *desc; + u8 last_page; + bool skip_voltage_config; + bool skip_goa_config; + bool skip_goa_timing; + bool skip_goa_even_timing; + bool skip_mipi_timing; +}; + +struct hx8279_panel_mode { + const struct drm_display_mode mode; + u8 bpc; + bool is_video_mode; +}; + +/** + * struct hx8279_goa_mux - Gate driver On Array Muxer + * @gout_l: Mux GOA signal to GOUT Left pin + * @gout_r: Mux GOA signal to GOUT Right pin + */ +struct hx8279_goa_mux { + u8 gout_l[20]; + u8 gout_r[20]; +}; + +/** + * struct hx8279_analog_gamma - Analog Gamma Adjustment + * @pos: Positive gamma op's input voltage, adjusted by VGP(H/L) + * @neg: Negative gamma op's input voltage, adjusted by VGN(H/L) + * + * Analog Gamma correction is performed with 17+17 reference voltages, + * changed with resistor streams, and defined with 17 register values + * for positive and 17 for negative. + * + * Each register holds resistance values, in 8.5ohms per unit, for the + * following gamma levels: + * 0, 8, 16, 28, 40, 56, 80, 128, 176, 200, 216, 228, 240, 248, 252, 255. + */ +struct hx8279_analog_gamma { + u8 pos[17]; + u8 neg[17]; +}; + +/** + * struct hx8279_digital_gamma - Digital Gamma Adjustment + * @r: Adjustment for red component + * @g: Adjustment for green component + * @b: Adjustment for blue component + * + * The layout of this structure follows the register layout to simplify + * both the handling and the declaration of those values in the driver. + * Gamma correction is internally done with a 24 segment piecewise + * linear interpolation; those segments are defined with 24 ten bits + * values of which: + * - The LOW eight bits for the first 24 registers start at the first + * register (at 0xb1) of the Digital Gamma Adjustment page; + * - The HIGH two bits for each of the 24 registers are contained + * in the last six registers; + * - The last six registers contain four groups of two-bits HI values + * for each of the first 24 registers, but in an inverted fashion, + * this means that the first two bits relate to the last register + * of a set of four. + * + * The 24 segments refer to the following gamma levels: + * 0, 1, 3, 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, + * 192, 208, 224, 232, 240, 244, 248, 252, 254, 255 + */ +struct hx8279_digital_gamma { + u8 r[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES]; + u8 g[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES]; + u8 b[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES]; +}; + +struct hx8279_panel_desc { + const struct mipi_dsi_device_info dsi_info; + const struct hx8279_panel_mode *mode_data; + u8 num_lanes; + u8 num_modes; + + /* Page 0 */ + unsigned int vgh_mv; + unsigned int vgl_mv; + unsigned int vgph_mv; + unsigned int vgnh_mv; + + /* Page 1 */ + const struct hx8279_goa_mux *gmux; + + /* Page 2 */ + const struct hx8279_analog_gamma *agamma; + + /* Page 3 */ + u8 goa_unk_ba; + u8 goa_odd_timing[6]; + u8 goa_even_timing[6]; + u8 goa_stv_lead_time_ck; + u8 goa_ckv_lead_time_ck; + u8 goa_ckv_dummy_vblank_num; + u8 goa_ckv_rise_precharge; + u8 goa_ckv_fall_precharge; + bool goa_ckv_non_overlap_ctl; + u8 goa_clr1_width_adj; + u8 goa_clr234_width_adj; + s8 goa_clr_polarity[4]; + int goa_clr_start_pos[4]; + u8 goa_unk_e4; + u8 goa_unk_e5; + + /* Page 5 */ + u8 bta_tlpx; + bool lhs_settle_time_by_osc25; + u8 ths_settle_time; + u8 timing_unk_b8; + u8 timing_unk_bc; + u8 timing_unk_d6; + + /* Page 6 */ + u8 gamma_ctl; + u8 volt_adj; + u8 src_delay_time_adj_ck; + + /* Page 7..12 */ + const struct hx8279_digital_gamma *dgamma; +}; + +static inline struct hx8279 *to_hx8279(struct drm_panel *panel) +{ + return container_of(panel, struct hx8279, panel); +} + +static void hx8279_set_page(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx, u8 page) +{ + const u8 cmd_set_page[] = { HX8279_REG_PAGE, page }; + + if (hx->last_page == page) + return; + + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_page, ARRAY_SIZE(cmd_set_page)); + if (!dsi_ctx->accum_err) + hx->last_page = page; +} + +static void hx8279_set_module_config(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_panel_desc *desc = hx->desc; + u8 cmd_set_voltage[2]; + + if (hx->skip_voltage_config) + return; + + /* Page 0 - Driver/Module Configuration */ + hx8279_set_page(hx, dsi_ctx, 0); + + if (desc->vgh_mv) { + cmd_set_voltage[0] = HX8279_P0_VGHS; + cmd_set_voltage[1] = HX8279_VGH_VOLT_SEL(desc->vgh_mv); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage, + ARRAY_SIZE(cmd_set_voltage)); + } + + if (desc->vgl_mv) { + cmd_set_voltage[0] = HX8279_P0_VGLS; + cmd_set_voltage[1] = HX8279_VGL_VOLT_SEL(desc->vgl_mv); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage, + ARRAY_SIZE(cmd_set_voltage)); + } + + if (desc->vgph_mv) { + cmd_set_voltage[0] = HX8279_P0_VGPHS; + cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgph_mv); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage, + ARRAY_SIZE(cmd_set_voltage)); + } + + if (desc->vgnh_mv) { + cmd_set_voltage[0] = HX8279_P0_VGNHS; + cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgnh_mv); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage, + ARRAY_SIZE(cmd_set_voltage)); + } +} + +static void hx8279_set_gmux(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_goa_mux *gmux = hx->desc->gmux; + u8 cmd_set_gmux[2]; + int i; + + if (!gmux) + return; + + hx8279_set_page(hx, dsi_ctx, 1); + + for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) { + cmd_set_gmux[0] = HX8279_P1_REG_GOUTL(i); + cmd_set_gmux[1] = gmux->gout_l[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux, + ARRAY_SIZE(cmd_set_gmux)); + } + + for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) { + cmd_set_gmux[0] = HX8279_P1_REG_GOUTR(i); + cmd_set_gmux[1] = gmux->gout_r[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux, + ARRAY_SIZE(cmd_set_gmux)); + } +} + +static void hx8279_set_analog_gamma(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_analog_gamma *agamma = hx->desc->agamma; + u8 cmd_set_ana_gamma[2]; + int i; + + if (!agamma) + return; + + hx8279_set_page(hx, dsi_ctx, 2); + + for (i = 0; i < ARRAY_SIZE(agamma->pos); i++) { + cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVP(i); + cmd_set_ana_gamma[1] = agamma->pos[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma, + ARRAY_SIZE(cmd_set_ana_gamma)); + } + + for (i = 0; i < ARRAY_SIZE(agamma->neg); i++) { + cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVN(i); + cmd_set_ana_gamma[1] = agamma->neg[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma, + ARRAY_SIZE(cmd_set_ana_gamma)); + } +} + +static void hx8279_set_goa_timing(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_panel_desc *desc = hx->desc; + u8 cmd_set_goa_t[2]; + int i; + + if (hx->skip_goa_timing) + return; + + hx8279_set_page(hx, dsi_ctx, 3); + + for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) { + cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TO(i); + cmd_set_goa_t[1] = desc->goa_odd_timing[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t, + ARRAY_SIZE(cmd_set_goa_t)); + } + + for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) { + cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TE(i); + cmd_set_goa_t[1] = desc->goa_odd_timing[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t, + ARRAY_SIZE(cmd_set_goa_t)); + } +} + +static void hx8279_set_goa_cfg(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_panel_desc *desc = hx->desc; + u8 cmd_set_goa[2]; + int i; + + if (hx->skip_goa_config) + return; + + hx8279_set_page(hx, dsi_ctx, 3); + + if (desc->goa_unk_ba) { + cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_BA; + cmd_set_goa[1] = desc->goa_unk_ba; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + if (desc->goa_stv_lead_time_ck) { + cmd_set_goa[0] = HX8279_P3_REG_GOA_STVL; + cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_STV_LEAD, + desc->goa_stv_lead_time_ck); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + if (desc->goa_ckv_lead_time_ck) { + cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVL; + cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_DUMMY, + desc->goa_ckv_lead_time_ck); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + if (desc->goa_ckv_dummy_vblank_num) { + cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVD; + cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_LEAD, + desc->goa_ckv_dummy_vblank_num); + cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CKV_NONOVERLAP, + desc->goa_ckv_non_overlap_ctl); + /* RESERVED must be always set */ + cmd_set_goa[1] |= HX8279_P3_GOA_CKV_RESERVED; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + /* + * One of the two being more than zero means that we want to write + * both of them. Anyway, the register default is zero in this case. + */ + if (desc->goa_ckv_rise_precharge || desc->goa_ckv_fall_precharge) { + cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_RISE_PREC; + cmd_set_goa[1] = desc->goa_ckv_rise_precharge; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + + cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_FALL_PREC; + cmd_set_goa[1] = desc->goa_ckv_fall_precharge; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + if (desc->goa_clr1_width_adj) { + cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR1_W_ADJ; + cmd_set_goa[1] = desc->goa_clr1_width_adj; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + if (desc->goa_clr234_width_adj) { + cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR234_W_ADJ; + cmd_set_goa[1] = desc->goa_clr234_width_adj; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + /* Polarity and Start Position arrays are of the same size */ + for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) { + if (desc->goa_clr_polarity[i] < 0 || desc->goa_clr_start_pos[i] < 0) + continue; + + cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR_CFG(i); + cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CLR_CFG_STARTPOS, + desc->goa_clr_start_pos[i]); + cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CLR_CFG_POLARITY, + desc->goa_clr_polarity[i]); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + if (desc->goa_unk_e4) { + cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E4; + cmd_set_goa[1] = desc->goa_unk_e4; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); + } + + cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E5; + cmd_set_goa[1] = desc->goa_unk_e5; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa, + ARRAY_SIZE(cmd_set_goa)); +} + +static void hx8279_set_mipi_cfg(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_panel_desc *desc = hx->desc; + u8 cmd_set_mipi[2]; + + if (hx->skip_mipi_timing) + return; + + hx8279_set_page(hx, dsi_ctx, 5); + + if (desc->bta_tlpx || desc->ths_settle_time || desc->lhs_settle_time_by_osc25) { + cmd_set_mipi[0] = HX8279_P5_REG_TIMING; + cmd_set_mipi[1] = FIELD_PREP(HX8279_P5_TIMING_TLPX, desc->bta_tlpx); + cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_THS_SETTLE, + desc->ths_settle_time); + cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_LHS_SETTLE, + desc->lhs_settle_time_by_osc25); + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi, + ARRAY_SIZE(cmd_set_mipi)); + } + + if (desc->timing_unk_b8) { + cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_B8; + cmd_set_mipi[1] = desc->timing_unk_b8; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi, + ARRAY_SIZE(cmd_set_mipi)); + } + + if (desc->timing_unk_bc) { + cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_BC; + cmd_set_mipi[1] = desc->timing_unk_bc; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi, + ARRAY_SIZE(cmd_set_mipi)); + } + + if (desc->timing_unk_d6) { + cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_D6; + cmd_set_mipi[1] = desc->timing_unk_d6; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi, + ARRAY_SIZE(cmd_set_mipi)); + } +} + +static void hx8279_set_adv_cfg(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_panel_desc *desc = hx->desc; + const u8 cmd_set_dly[] = { HX8279_P6_REG_DLY_TIME_ADJ, desc->src_delay_time_adj_ck }; + const u8 cmd_set_gamma[] = { HX8279_P6_REG_GAMMA_CHOPPER, desc->gamma_ctl }; + const u8 cmd_set_volt_adj[] = { HX8279_P6_REG_VOLT_ADJ, desc->volt_adj }; + u8 cmd_set_eng[] = { HX8279_P6_REG_ENGINEER_PWD, HX8279_P6_ENG_UNLOCK_WORD }; + + if (!desc->gamma_ctl && !desc->src_delay_time_adj_ck && !desc->volt_adj) + return; + + hx8279_set_page(hx, dsi_ctx, 6); + + /* Unlock ENG settings: write same word to both ENGINEER_PWD and INHOUSE_FUNC */ + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng)); + + cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng)); + + /* Set Gamma Chopper and Gamma buffer Chopper control */ + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gamma, ARRAY_SIZE(cmd_set_gamma)); + + /* Set Source delay time adjustment (CKV falling to Source off) */ + if (desc->src_delay_time_adj_ck) + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dly, + ARRAY_SIZE(cmd_set_dly)); + + /* Set voltage adjustment */ + if (desc->volt_adj) + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_volt_adj, + ARRAY_SIZE(cmd_set_volt_adj)); + + /* Lock ENG settings again */ + cmd_set_eng[0] = HX8279_P6_REG_ENGINEER_PWD; + cmd_set_eng[1] = 0; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng)); + + cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng)); +} + +static void hx8279_set_digital_gamma(struct hx8279 *hx, + struct mipi_dsi_multi_context *dsi_ctx) +{ + const struct hx8279_digital_gamma *dgamma = hx->desc->dgamma; + u8 cmd_set_dig_gamma[2]; + int i; + + if (!dgamma) + return; + + /* + * Pages 7..9 are for RGB Positive, 10..12 are for RGB Negative: + * The first iteration sets all positive component registers, + * the second one sets all negatives. + */ + for (i = 0; i < 2; i++) { + u8 pg_neg = i * 3; + + hx8279_set_page(hx, dsi_ctx, 7 + pg_neg); + + for (i = 0; i < ARRAY_SIZE(dgamma->r); i++) { + cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i; + cmd_set_dig_gamma[1] = dgamma->r[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma, + ARRAY_SIZE(cmd_set_dig_gamma)); + } + + hx8279_set_page(hx, dsi_ctx, 8 + pg_neg); + + for (i = 0; i < ARRAY_SIZE(dgamma->g); i++) { + cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i; + cmd_set_dig_gamma[1] = dgamma->g[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma, + ARRAY_SIZE(cmd_set_dig_gamma)); + } + + hx8279_set_page(hx, dsi_ctx, 9 + pg_neg); + + for (i = 0; i < ARRAY_SIZE(dgamma->b); i++) { + cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i; + cmd_set_dig_gamma[1] = dgamma->b[i]; + mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma, + ARRAY_SIZE(cmd_set_dig_gamma)); + } + } +} + +static int hx8279_on(struct hx8279 *hx) +{ + struct mipi_dsi_device *dsi = hx->dsi[0]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + /* Page 5 */ + hx8279_set_mipi_cfg(hx, &dsi_ctx); + + /* Page 1 */ + hx8279_set_gmux(hx, &dsi_ctx); + + /* Page 2 */ + hx8279_set_analog_gamma(hx, &dsi_ctx); + + /* Page 3 */ + hx8279_set_goa_cfg(hx, &dsi_ctx); + hx8279_set_goa_timing(hx, &dsi_ctx); + + /* Page 0 - Driver/Module Configuration */ + hx8279_set_module_config(hx, &dsi_ctx); + + /* Page 6 */ + hx8279_set_adv_cfg(hx, &dsi_ctx); + + /* Pages 7..12 */ + hx8279_set_digital_gamma(hx, &dsi_ctx); + + return dsi_ctx.accum_err; +} + +static void hx8279_power_off(struct hx8279 *hx) +{ + gpiod_set_value_cansleep(hx->reset_gpio, 0); + usleep_range(100, 500); + gpiod_set_value_cansleep(hx->enable_gpio, 0); + regulator_bulk_disable(ARRAY_SIZE(hx->vregs), hx->vregs); +} + +static int hx8279_disable(struct drm_panel *panel) +{ + struct hx8279 *hx = to_hx8279(panel); + struct mipi_dsi_device *dsi = hx->dsi[0]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + + return 0; +} + +static int hx8279_enable(struct drm_panel *panel) +{ + struct hx8279 *hx = to_hx8279(panel); + struct mipi_dsi_device *dsi = hx->dsi[0]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return 0; +} + +static int hx8279_prepare(struct drm_panel *panel) +{ + struct hx8279 *hx = to_hx8279(panel); + struct mipi_dsi_device *dsi = hx->dsi[0]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(hx->vregs), hx->vregs); + if (ret) + return ret; + + gpiod_set_value_cansleep(hx->enable_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(hx->reset_gpio, 1); + usleep_range(6000, 7000); + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + if (hx->dsi[1]) + hx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; + + ret = hx8279_on(hx); + if (ret) { + hx8279_power_off(hx); + return ret; + } + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 130); + + return dsi_ctx.accum_err; +} + +static int hx8279_unprepare(struct drm_panel *panel) +{ + struct hx8279 *hx = to_hx8279(panel); + struct mipi_dsi_device *dsi = hx->dsi[0]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 130); + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + if (hx->dsi[1]) + hx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM; + + hx8279_power_off(hx); + + return dsi_ctx.accum_err; +} + +static int hx8279_get_modes(struct drm_panel *panel, struct drm_connector *connector) +{ + struct hx8279 *hx = to_hx8279(panel); + int i; + + for (i = 0; i < hx->desc->num_modes; i++) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &hx->desc->mode_data[i].mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type |= DRM_MODE_TYPE_DRIVER; + if (hx->desc->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = hx->desc->mode_data[0].bpc; + connector->display_info.height_mm = hx->desc->mode_data[0].mode.height_mm; + connector->display_info.width_mm = hx->desc->mode_data[0].mode.width_mm; + + return hx->desc->num_modes; +} + +static const struct drm_panel_funcs hx8279_panel_funcs = { + .disable = hx8279_disable, + .unprepare = hx8279_unprepare, + .prepare = hx8279_prepare, + .enable = hx8279_enable, + .get_modes = hx8279_get_modes, +}; + +static int hx8279_init_vregs(struct hx8279 *hx, struct device *dev) +{ + int ret; + + hx->vregs[0].supply = "vdd"; + hx->vregs[1].supply = "iovcc"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->vregs), + hx->vregs); + if (ret < 0) + return ret; + + ret = regulator_is_supported_voltage(hx->vregs[0].consumer, + 3000000, 5000000); + if (!ret) + return -EINVAL; + + ret = regulator_is_supported_voltage(hx->vregs[1].consumer, + 1700000, 1900000); + if (!ret) + return -EINVAL; + + return 0; +} + +static int hx8279_check_gmux_config(struct hx8279 *hx, struct device *dev) +{ + const struct hx8279_panel_desc *desc = hx->desc; + const struct hx8279_goa_mux *gmux = desc->gmux; + int i; + + /* No gmux defined means we simply skip the GOA mux configuration */ + if (!gmux) + return 0; + + for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) { + if (gmux->gout_l[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL)) + return dev_err_probe(dev, -EINVAL, + "Invalid value found in gout_l[%d]\n", i); + } + + for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) { + if (gmux->gout_r[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL)) + return dev_err_probe(dev, -EINVAL, + "Invalid value found in gout_r[%d]\n", i); + } + + return 0; +} + +static int hx8279_check_goa_config(struct hx8279 *hx, struct device *dev) +{ + const struct hx8279_panel_desc *desc = hx->desc; + bool goa_odd_valid, goa_even_valid; + int i, num_zero, num_clr = 0; + + /* Up to 4 zero values is a valid configuration. Check them all. */ + num_zero = 1; + for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) { + if (desc->goa_odd_timing[i]) + num_zero++; + } + + goa_odd_valid = (num_zero != ARRAY_SIZE(desc->goa_odd_timing)); + + /* Up to 3 zeroes is a valid config. Check them all. */ + num_zero = 1; + for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) { + if (desc->goa_even_timing[i]) + num_zero++; + } + + goa_even_valid = (num_zero != ARRAY_SIZE(desc->goa_even_timing)); + + /* Programming one without the other would make no sense! */ + if (goa_odd_valid != goa_even_valid) + return -EINVAL; + + /* We know that both are either true or false now, check just one */ + if (!goa_odd_valid) + hx->skip_goa_timing = true; + + if (!desc->goa_unk_ba && !desc->goa_stv_lead_time_ck && + !desc->goa_ckv_lead_time_ck && !desc->goa_ckv_dummy_vblank_num && + !desc->goa_ckv_rise_precharge && !desc->goa_ckv_fall_precharge && + !desc->goa_clr1_width_adj && !desc->goa_clr234_width_adj && + !desc->goa_unk_e4 && !desc->goa_unk_e5) { + hx->skip_goa_config = true; + return 0; + } + + if ((desc->goa_stv_lead_time_ck > HX8279_P3_GOA_STV_LEAD) || + (desc->goa_ckv_lead_time_ck > HX8279_P3_GOA_CKV_LEAD) || + (desc->goa_ckv_dummy_vblank_num > HX8279_P3_GOA_CKV_DUMMY)) + return dev_err_probe(dev, -EINVAL, + "Invalid lead timings in GOA config\n"); + + /* + * Don't perform zero check for polarity and start position, as + * both pol=0 and start=0 are valid configuration values. + */ + for (i = 0; i < ARRAY_SIZE(desc->goa_clr_start_pos); i++) { + if (desc->goa_clr_start_pos[i] < 0) + continue; + else if (desc->goa_clr_start_pos[i] > HX8279_P3_GOA_CLR_CFG_STARTPOS) + return dev_err_probe(dev, -EINVAL, + "Invalid start position for CLR%d\n", i + 1); + else + num_clr++; + } + if (!num_clr) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) { + if (num_clr < 0) + return -EINVAL; + + if (desc->goa_clr_polarity[i] < 0) + continue; + else if (desc->goa_clr_polarity[i] > 1) + return dev_err_probe(dev, -EINVAL, + "Invalid polarity for CLR%d\n", i + 1); + else + num_clr--; + } + + return 0; +} + +static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u8 *component) +{ + u8 gamma_high_bits[4]; + u16 prev_val = 0; + int i, j, k, x; + + /* + * The gamma values are 10 bits long and shall be incremental + * to form a digital gamma correction reference curve. + * + * As for the registers format: the first 24 bytes contain each the + * lowest 8 bits for each of the gamma level references, and the last + * 6 bytes contain the high two bits of 4 registers at a time, where + * the first two bits are relative to the last register, and the last + * two are relative to the first register. + * + * Another way of saying, those are the first four LOW values: + * DGMA1_LO = 0xb1, DGMA2_LO = 0xb2, DGMA3_LO = 0xb3, DGMA4_LO = 0xb4 + * + * The high values for those four are at DGMA1_4_HI = 0xc9; + * ...and DGMA1_4_HI's data contains the following bits: + * [1:0] = DGMA4_HI, [3:2] = DGMA3_HI, [5:4] = DGMA2_HI, [7:6] = DGMA1_HI + */ + for (i = 0; i < HX8279_PG_DGAMMA_NUM_HI_BYTES; i++) { + k = HX8279_PG_DGAMMA_NUM_LO_BYTES + i; + j = i * 4; + x = 0; + + gamma_high_bits[0] = FIELD_GET(HX8279_DGAMMA_DGMA1_HI, component[k]); + gamma_high_bits[1] = FIELD_GET(HX8279_DGAMMA_DGMA2_HI, component[k]); + gamma_high_bits[2] = FIELD_GET(HX8279_DGAMMA_DGMA3_HI, component[k]); + gamma_high_bits[3] = FIELD_GET(HX8279_DGAMMA_DGMA4_HI, component[k]); + + do { + u16 cur_val = component[j] | (gamma_high_bits[x] << 8); + + if (cur_val < prev_val) + return dev_err_probe(dev, -EINVAL, + "Invalid dgamma values: %u < %u!\n", + cur_val, prev_val); + prev_val = cur_val; + j++; + x++; + } while (x < 4); + }; + + return 0; +} + +static int hx8279_check_params(struct hx8279 *hx, struct device *dev) +{ + const struct hx8279_panel_desc *desc = hx->desc; + int ret; + + /* Voltages config validation */ + if (!desc->vgh_mv && !desc->vgl_mv && !desc->vgph_mv && !desc->vgnh_mv) + hx->skip_voltage_config = true; + else if ((desc->vgh_mv && desc->vgh_mv < HX8279_VGH_MIN_MV) || + (desc->vgl_mv && desc->vgl_mv < HX8279_VGL_MIN_MV) || + (desc->vgph_mv && desc->vgph_mv < HX8279_VGPNH_MIN_MV) || + (desc->vgnh_mv && desc->vgnh_mv < HX8279_VGPNH_MIN_MV)) + return -EINVAL; + + /* GOA Muxing validation */ + ret = hx8279_check_gmux_config(hx, dev); + if (ret) + return ret; + + /* GOA Configuration validation */ + ret = hx8279_check_goa_config(hx, dev); + if (ret) + return ret; + + /* MIPI Configuration validation */ + if (!desc->bta_tlpx && !desc->lhs_settle_time_by_osc25 && + !desc->ths_settle_time && !desc->timing_unk_b8 && + !desc->timing_unk_bc && !desc->timing_unk_d6) + hx->skip_mipi_timing = true; + + /* ENG/Gamma Configuration validation */ + if (desc->gamma_ctl > (HX8279_P6_GAMMA_POCGM_CTL | HX8279_P6_GAMMA_POGCMD_CTL)) + return -EINVAL; + + /* Digital Gamma values validation */ + if (desc->dgamma) { + ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->r); + if (ret) + return ret; + + ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->g); + if (ret) + return ret; + + ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->b); + if (ret) + return ret; + } + + return 0; +} + +static int hx8279_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct device_node *dsi_r; + struct hx8279 *hx; + int i, ret; + + hx = devm_drm_panel_alloc(dev, struct hx8279, panel, + &hx8279_panel_funcs, DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(hx)) + return PTR_ERR(hx); + + ret = hx8279_init_vregs(hx, dev); + if (ret) + return ret; + + hx->desc = device_get_match_data(dev); + if (!hx->desc) + return -ENODEV; + + /* + * In some DriverICs some or all fields may be OTP: perform a + * basic configuration check before writing to help avoiding + * irreparable mistakes. + * + * Please note that this is not perfect and will only check if + * the values may be plausible; values that are wrong for a + * specific display, but still plausible for DrIC config will + * be accepted. + */ + ret = hx8279_check_params(hx, dev); + if (ret) + return dev_err_probe(dev, ret, "Invalid DriverIC configuration\n"); + + /* The enable line may be always tied to VCCIO, so this is optional */ + hx->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_ASIS); + if (IS_ERR(hx->enable_gpio)) + return dev_err_probe(dev, PTR_ERR(hx->enable_gpio), + "Failed to get enable GPIO\n"); + + hx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS); + if (IS_ERR(hx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(hx->reset_gpio), + "Failed to get reset GPIO\n"); + + /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */ + dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1); + if (dsi_r) { + const struct mipi_dsi_device_info *info = &hx->desc->dsi_info; + struct mipi_dsi_host *dsi_r_host; + + dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); + of_node_put(dsi_r); + if (!dsi_r_host) + return dev_err_probe(dev, -EPROBE_DEFER, + "Cannot get secondary DSI host\n"); + + hx->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, info); + if (IS_ERR(hx->dsi[1])) + return dev_err_probe(dev, PTR_ERR(hx->dsi[1]), + "Cannot get secondary DSI node\n"); + mipi_dsi_set_drvdata(hx->dsi[1], hx); + } + + hx->dsi[0] = dsi; + mipi_dsi_set_drvdata(dsi, hx); + + ret = drm_panel_of_backlight(&hx->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&hx->panel); + + for (i = 0; i < 2; i++) { + if (!hx->dsi[i]) + continue; + + hx->dsi[i]->lanes = hx->desc->num_lanes; + hx->dsi[i]->format = MIPI_DSI_FMT_RGB888; + + hx->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_LPM; + + if (hx->desc->mode_data[0].is_video_mode) + hx->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_VIDEO_SYNC_PULSE; + + ret = devm_mipi_dsi_attach(dev, hx->dsi[i]); + if (ret < 0) { + drm_panel_remove(&hx->panel); + return dev_err_probe(dev, ret, + "Cannot attach to DSI%d host.\n", i); + } + } + + return 0; +} + +static void hx8279_remove(struct mipi_dsi_device *dsi) +{ + struct hx8279 *hx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&hx->panel); +} + +static const struct hx8279_panel_mode aoly_sl101pm1794fog_v15_modes[] = { + { + .mode = { + .clock = 159420, + .hdisplay = 1200, + .hsync_start = 1200 + 80, + .hsync_end = 1200 + 80 + 60, + .htotal = 1200 + 80 + 60 + 24, + .vdisplay = 1920, + .vsync_start = 1920 + 10, + .vsync_end = 1920 + 10 + 14, + .vtotal = 1920 + 10 + 14 + 4, + .width_mm = 136, + .height_mm = 217, + .type = DRM_MODE_TYPE_DRIVER + }, + .bpc = 8, + .is_video_mode = true, + }, +}; + +static const struct hx8279_panel_mode startek_kd070fhfid078_modes[] = { + { + .mode = { + .clock = 156458, + .hdisplay = 1200, + .hsync_start = 1200 + 50, + .hsync_end = 1200 + 50 + 24, + .htotal = 1200 + 50 + 24 + 66, + .vdisplay = 1920, + .vsync_start = 1920 + 14, + .vsync_end = 1920 + 14 + 2, + .vtotal = 1920 + 14 + 2 + 10, + .width_mm = 95, + .height_mm = 151, + .type = DRM_MODE_TYPE_DRIVER + }, + .bpc = 8, + .is_video_mode = true, + }, +}; + +static const struct hx8279_goa_mux aoly_sl101pm1794fog_v15_gmux = { + .gout_l = { 0x5, 0x5, 0xb, 0xb, 0x9, 0x9, 0x16, 0x16, 0xe, 0xe, + 0x7, 0x7, 0x26, 0x26, 0x15, 0x15, 0x1, 0x1, 0x3, 0x3 }, + .gout_r = { 0x6, 0x6, 0xc, 0xc, 0xa, 0xa, 0x16, 0x16, 0xe, 0xe, + 0x8, 0x8, 0x26, 0x26, 0x15, 0x15, 0x2, 0x2, 0x4, 0x4 }, +}; + +static const struct hx8279_analog_gamma aoly_sl101pm1794fog_v15_ana_gamma = { + .pos = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x1c, 0x2c, 0x33, 0x31, + 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 }, + .neg = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x3f, 0x3f, 0x3f, 0x3f, + 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 }, +}; + +static const struct hx8279_digital_gamma aoly_sl101pm1794fog_v15_dig_gamma = { + .r = { 0x0, 0x5, 0x10, 0x22, 0x36, 0x4a, 0x6c, 0x9a, 0xd7, 0x17, + 0x92, 0x15, 0x18, 0x8c, 0x0, 0x3a, 0x72, 0x8c, 0xa5, 0xb1, + 0xbe, 0xca, 0xd1, 0xd4, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff }, + .g = { 0x4, 0x5, 0x11, 0x24, 0x39, 0x4e, 0x72, 0xa3, 0xe1, 0x25, + 0xa8, 0x2e, 0x32, 0xad, 0x28, 0x63, 0x9b, 0xb5, 0xcf, 0xdb, + 0xe8, 0xf5, 0xfa, 0xfc, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff }, + .b = { 0x4, 0x4, 0xf, 0x22, 0x37, 0x4d, 0x71, 0xa2, 0xe1, 0x26, + 0xa9, 0x2f, 0x33, 0xac, 0x24, 0x5d, 0x94, 0xac, 0xc5, 0xd1, + 0xdc, 0xe8, 0xed, 0xf0, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff }, +}; + +static const struct hx8279_panel_desc aoly_sl101pm1794fog_v15 = { + .dsi_info = { + .type = "L101PM1794FOG-V15", + .channel = 0, + .node = NULL, + }, + .mode_data = aoly_sl101pm1794fog_v15_modes, + .num_modes = ARRAY_SIZE(aoly_sl101pm1794fog_v15_modes), + .num_lanes = 4, + + /* Driver/Module Configuration: LC Matrix voltages */ + .vgh_mv = 16500, + .vgl_mv = 11200, + .vgph_mv = 4600, + .vgnh_mv = 4600, + + /* Analog Gamma correction */ + .agamma = &aoly_sl101pm1794fog_v15_ana_gamma, + + /* Gate driver On Array (GOA) Muxing */ + .gmux = &aoly_sl101pm1794fog_v15_gmux, + + /* Gate driver On Array (GOA) Mux Config */ + .goa_unk_ba = 0xf0, + .goa_odd_timing = { 0, 0, 0, 42, 0, 0 }, + .goa_even_timing = { 1, 42, 0, 0 }, + .goa_stv_lead_time_ck = 11, + .goa_ckv_lead_time_ck = 7, + .goa_ckv_dummy_vblank_num = 3, + .goa_ckv_rise_precharge = 1, + .goa_clr1_width_adj = 0, + .goa_clr234_width_adj = 0, + .goa_clr_polarity = { 1, 0, 0, 0 }, + .goa_clr_start_pos = { 8, 9, 3, 4 }, + .goa_unk_e4 = 0xc0, + .goa_unk_e5 = 0x0d, + + /* MIPI Configuration */ + .bta_tlpx = 2, + .lhs_settle_time_by_osc25 = true, + .ths_settle_time = 2, + .timing_unk_b8 = 0xa5, + .timing_unk_bc = 0x20, + .timing_unk_d6 = 0x7f, + + /* ENG/Gamma Configuration */ + .gamma_ctl = 0, + .volt_adj = FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCIFS, 3) | + FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCS, 3), + .src_delay_time_adj_ck = 50, + + /* Digital Gamma Adjustment */ + .dgamma = &aoly_sl101pm1794fog_v15_dig_gamma, +}; + +static const struct hx8279_goa_mux startek_kd070fhfid078_gmux = { + .gout_l = { 0xd, 0xd, 0x6, 0x6, 0x8, 0x8, 0xa, 0xa, 0xc, 0xc, + 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x4, 0x4, 0x0, 0x0 }, + .gout_r = { 0xd, 0xd, 0x5, 0x5, 0x7, 0x7, 0x9, 0x9, 0xb, 0xb, + 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x3, 0x3, 0x0, 0x0 }, +}; + +static const struct hx8279_panel_desc startek_kd070fhfid078 = { + .dsi_info = { + .type = "KD070FHFID078", + .channel = 0, + .node = NULL, + }, + .mode_data = startek_kd070fhfid078_modes, + .num_modes = ARRAY_SIZE(startek_kd070fhfid078_modes), + .num_lanes = 4, + + /* Driver/Module Configuration: LC Matrix voltages */ + .vgh_mv = 18000, + .vgl_mv = 12100, + .vgph_mv = 5500, + .vgnh_mv = 5500, + + /* Gate driver On Array (GOA) Mux Config */ + .gmux = &startek_kd070fhfid078_gmux, + + /* Gate driver On Array (GOA) Configuration */ + .goa_unk_ba = 0xf0, + .goa_stv_lead_time_ck = 7, + .goa_ckv_lead_time_ck = 3, + .goa_ckv_dummy_vblank_num = 1, + .goa_ckv_rise_precharge = 0, + .goa_ckv_fall_precharge = 0, + .goa_clr1_width_adj = 1, + .goa_clr234_width_adj = 5, + .goa_clr_polarity = { 0, 1, -1, -1 }, + .goa_clr_start_pos = { 5, 10, -1, -1 }, + .goa_unk_e4 = 0xc0, + .goa_unk_e5 = 0x00, + + /* MIPI Configuration */ + .bta_tlpx = 2, + .lhs_settle_time_by_osc25 = true, + .ths_settle_time = 2, + .timing_unk_b8 = 0x7f, + .timing_unk_bc = 0x20, + .timing_unk_d6 = 0x7f, + + /* ENG/Gamma Configuration */ + .gamma_ctl = FIELD_PREP_CONST(HX8279_P6_GAMMA_POCGM_CTL, 1) | + FIELD_PREP_CONST(HX8279_P6_GAMMA_POGCMD_CTL, 1), + .src_delay_time_adj_ck = 72, +}; + +static const struct of_device_id hx8279_of_match[] = { + { .compatible = "aoly,sl101pm1794fog-v15", .data = &aoly_sl101pm1794fog_v15 }, + { .compatible = "startek,kd070fhfid078", .data = &startek_kd070fhfid078 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hx8279_of_match); + +static struct mipi_dsi_driver hx8279_driver = { + .probe = hx8279_probe, + .remove = hx8279_remove, + .driver = { + .name = "panel-himax-hx8279", + .of_match_table = hx8279_of_match, + }, +}; +module_mipi_dsi_driver(hx8279_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_DESCRIPTION("Himax HX8279 DriverIC panels driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index 92b03a2f65a3..ff994bf0e3cc 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -80,7 +80,7 @@ struct hx8394_panel_desc { unsigned int lanes; unsigned long mode_flags; enum mipi_dsi_pixel_format format; - int (*init_sequence)(struct hx8394 *ctx); + void (*init_sequence)(struct mipi_dsi_multi_context *dsi_ctx); }; static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel) @@ -88,98 +88,94 @@ static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel) return container_of(panel, struct hx8394, panel); } -static int hsd060bhw4_init_sequence(struct hx8394 *ctx) +static void hsd060bhw4_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - /* 5.19.8 SETEXTC: Set extension command (B9h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC, - 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC, + 0xff, 0x83, 0x94); /* 5.19.2 SETPOWER: Set power (B1h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, - 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER, + 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30); /* 5.19.9 SETMIPI: Set MIPI control (BAh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI, - 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI, + 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); /* 5.19.3 SETDISP: Set display related register (B2h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP, - 0x00, 0x80, 0x78, 0x0c, 0x07); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP, + 0x00, 0x80, 0x78, 0x0c, 0x07); /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC, - 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55, - 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c, - 0x7c); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC, + 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55, + 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c, + 0x7c); /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0, - 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10, - 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00, - 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00, - 0x00, 0x0c, 0x40); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0, + 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10, + 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00, + 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00, + 0x00, 0x0c, 0x40); /* 5.19.20 Set GIP Option1 (D5h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1, - 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1, + 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); /* 5.19.21 Set GIP Option2 (D6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2, - 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06, - 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2, + 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06, + 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA, - 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f, - 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a, - 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00, - 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31, - 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, - 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b, - 0x4a, 0x4c, 0x4b, 0x7f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA, + 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f, + 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a, + 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00, + 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31, + 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, + 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b, + 0x4a, 0x4c, 0x4b, 0x7f); /* 5.19.17 SETPANEL (CCh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL, - 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL, + 0x0b); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1, - 0x1f, 0x31); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1, + 0x1f, 0x31); /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM, - 0x7d, 0x7d); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM, + 0x7d, 0x7d); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, - 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3, + 0x02); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x01); /* 5.19.2 SETPOWER: Set power (B1h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER, + 0x00); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x00); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, - 0xed); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3, + 0xed); } static const struct drm_display_mode hsd060bhw4_mode = { @@ -205,114 +201,110 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = { .init_sequence = hsd060bhw4_init_sequence, }; -static int powkiddy_x55_init_sequence(struct hx8394 *ctx) +static void powkiddy_x55_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - /* 5.19.8 SETEXTC: Set extension command (B9h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC, - 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC, + 0xff, 0x83, 0x94); /* 5.19.9 SETMIPI: Set MIPI control (BAh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI, - 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI, + 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); /* 5.19.2 SETPOWER: Set power (B1h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, - 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER, + 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47); /* 5.19.3 SETDISP: Set display related register (B2h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP, - 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP, + 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f); /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC, - 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75, - 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, - 0x86); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC, + 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75, + 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, + 0x86); /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM, - 0x6e, 0x6e); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM, + 0x6e, 0x6e); /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0, - 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10, - 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15, - 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, - 0x07, 0x0c, 0x40); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0, + 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10, + 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15, + 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, + 0x07, 0x0c, 0x40); /* 5.19.20 Set GIP Option1 (D5h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1, - 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, - 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18, - 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21, - 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1, + 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18, + 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21, + 0x18, 0x18, 0x18, 0x18); /* 5.19.21 Set GIP Option2 (D6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2, - 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, - 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18, - 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24, - 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2, + 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, + 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18, + 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24, + 0x18, 0x18, 0x18, 0x18); /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA, - 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, - 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8, - 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, - 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65, - 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba, - 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA, + 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, + 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8, + 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, + 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65, + 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba, + 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1, - 0x1f, 0x31); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1, + 0x1f, 0x31); /* 5.19.17 SETPANEL (CCh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL, - 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL, + 0x0b); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, - 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3, + 0x02); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x02); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x00); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x01); /* 5.19.2 SETPOWER: Set power (B1h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER, + 0x00); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x00); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5, - 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN5, + 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01); /* Unknown command, not listed in the HX8394-F datasheet */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2, - 0xed); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2, + 0xed); } static const struct drm_display_mode powkiddy_x55_mode = { @@ -339,131 +331,127 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = { .init_sequence = powkiddy_x55_init_sequence, }; -static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx) +static void mchp_ac40t08a_init_sequence(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - /* DCS commands do not seem to be sent correclty without this delay */ - msleep(20); + mipi_dsi_msleep(dsi_ctx, 20); /* 5.19.8 SETEXTC: Set extension command (B9h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC, - 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC, + 0xff, 0x83, 0x94); /* 5.19.9 SETMIPI: Set MIPI control (BAh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI, - 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI, + 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); /* 5.19.2 SETPOWER: Set power (B1h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, - 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, - 0x71, 0x71, 0x57, 0x47); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER, + 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, + 0x71, 0x71, 0x57, 0x47); /* 5.19.3 SETDISP: Set display related register (B2h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP, - 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP, + 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f); /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC, - 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, - 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, - 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, - 0x01, 0x0c, 0x86); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC, + 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, + 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, + 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, + 0x01, 0x0c, 0x86); /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM, - 0x6e, 0x6e); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM, + 0x6e, 0x6e); /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0, - 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, - 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00, - 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, - 0x02, 0x15, 0x06, 0x05, 0x06, 0x47, - 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, - 0x07, 0x0c, 0x40); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0, + 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, + 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00, + 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, + 0x02, 0x15, 0x06, 0x05, 0x06, 0x47, + 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, + 0x07, 0x0c, 0x40); /* 5.19.20 Set GIP Option1 (D5h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1, - 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, - 0x18, 0x18, 0x26, 0x27, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x20, 0x21, 0x18, 0x18, - 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1, + 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, + 0x18, 0x18, 0x26, 0x27, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x20, 0x21, 0x18, 0x18, + 0x18, 0x18); /* 5.19.21 Set GIP Option2 (D6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2, - 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, - 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, - 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, - 0x18, 0x18, 0x27, 0x26, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, - 0x18, 0x18, 0x25, 0x24, 0x18, 0x18, - 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2, + 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, + 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, + 0x18, 0x18, 0x27, 0x26, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x25, 0x24, 0x18, 0x18, + 0x18, 0x18); /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA, - 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, - 0x24, 0x22, 0x47, 0x56, 0x65, 0x66, - 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, - 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61, - 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, - 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, - 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e, - 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, - 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67, - 0x6b, 0x72, 0x7f, 0x7f); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA, + 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, + 0x24, 0x22, 0x47, 0x56, 0x65, 0x66, + 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, + 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61, + 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, + 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, + 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e, + 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, + 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67, + 0x6b, 0x72, 0x7f, 0x7f); /* Unknown command, not listed in the HX8394-F datasheet (C0H) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1, - 0x1f, 0x73); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1, + 0x1f, 0x73); /* Set CABC control (C9h)*/ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC, - 0x76, 0x00, 0x30); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCABC, + 0x76, 0x00, 0x30); /* 5.19.17 SETPANEL (CCh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL, - 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL, + 0x0b); /* Unknown command, not listed in the HX8394-F datasheet (D4h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, - 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3, + 0x02); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x02); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x02); /* 5.19.11 Set register bank (D8h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x00); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x01); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x01); /* 5.19.2 SETPOWER: Set power (B1h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER, + 0x00); /* 5.19.11 Set register bank (BDh) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, - 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK, + 0x00); /* Unknown command, not listed in the HX8394-F datasheet (C6h) */ - mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2, - 0xed); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2, + 0xed); } static const struct drm_display_mode mchp_ac40t08a_mode = { @@ -493,35 +481,31 @@ static int hx8394_enable(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; int ret; - ret = ctx->desc->init_sequence(ctx); - if (ret) { - dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); - return ret; - } + ctx->desc->init_sequence(&dsi_ctx); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret) { - dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + if (dsi_ctx.accum_err) + return dsi_ctx.accum_err; /* Panel is operational 120 msec after reset */ msleep(120); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret) { - dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + if (dsi_ctx.accum_err) goto sleep_in; - } return 0; sleep_in: + ret = dsi_ctx.accum_err; + dsi_ctx.accum_err = 0; + /* This will probably fail, but let's try orderly power off anyway. */ - if (!mipi_dsi_dcs_enter_sleep_mode(dsi)) - msleep(50); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); return ret; } @@ -530,17 +514,12 @@ static int hx8394_disable(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; - - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret) { - dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - msleep(50); /* about 3 frames */ + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); /* about 3 frames */ - return 0; + return dsi_ctx.accum_err; } static int hx8394_unprepare(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c index 5d115ecd5dd4..b6429795e8f5 100644 --- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c +++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c @@ -413,15 +413,10 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi) static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi) { struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi); - int ret; - ret = drm_panel_unprepare(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); + drm_panel_unprepare(&ctx->panel); - ret = drm_panel_disable(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); + drm_panel_disable(&ctx->panel); } static void panel_nv3051d_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c index 04f1d2676c78..116d67bfa114 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c @@ -23,10 +23,12 @@ #define DSI_NUM_MIN 1 -#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \ - do { \ - mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \ - mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \ +#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \ + do { \ + dsi_ctx.dsi = dsi0; \ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \ + dsi_ctx.dsi = dsi1; \ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \ } while (0) struct panel_info { @@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo) { struct mipi_dsi_device *dsi0 = pinfo->dsi[0]; struct mipi_dsi_device *dsi1 = pinfo->dsi[1]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; /* No datasheet, so write magic init sequence directly */ - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11); - msleep(70); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29); - - return 0; + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11); + mipi_dsi_msleep(&dsi_ctx, 70); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29); + + return dsi_ctx.accum_err; } static int elish_csot_init_sequence(struct panel_info *pinfo) { struct mipi_dsi_device *dsi0 = pinfo->dsi[0]; struct mipi_dsi_device *dsi1 = pinfo->dsi[1]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; /* No datasheet, so write magic init sequence directly */ - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11); - msleep(70); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29); - - return 0; + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11); + mipi_dsi_msleep(&dsi_ctx, 70); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29); + + return dsi_ctx.accum_err; } static int j606f_boe_init_sequence(struct panel_info *pinfo) { struct mipi_dsi_device *dsi = pinfo->dsi[0]; - struct device *dev = &dsi->dev; - int ret; - - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9); - mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78); - mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a); - mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63); - mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91); - mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb); - mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66); - mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2); - mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00, - 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01, - 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03, - 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03, - 0xfd, 0x03, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00, - 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); - mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01, - 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03, - 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03, - 0xfd, 0x03, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00, - 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01, - 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03, - 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03, - 0xfd, 0x03, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00, - 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01, - 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03, - 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03, - 0xf5, 0x03, 0xf7); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00, - 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); - mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01, - 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03, - 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03, - 0xf5, 0x03, 0xf7); - mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00, - 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01, - 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03, - 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03, - 0xf5, 0x03, 0xf7); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80); - mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77); - mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32); - mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44); - mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00); - - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42); - mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66); - mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a); - mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91); - mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21); - mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43); - - ret = mipi_dsi_dcs_set_display_brightness(dsi, 18); - if (ret < 0) { - dev_err(dev, "Failed to set display brightness: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34); - mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02); - mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21); - mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82); - mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c); - mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xb6, - 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x05, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f); - mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55); - mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22); - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0); - mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3); - mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1); - mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2); - mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28); - mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13); - mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50); - mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51); - mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65); - mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb); - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00); - - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78); - mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82); - mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e); - mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c); - mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80); - mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53); - mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f); - mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d); - mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44); - mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22); - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d); - - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02); - - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13); - mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10); - usleep_range(10000, 11000); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - - ret = mipi_dsi_dcs_set_display_brightness(dsi, 0); - if (ret < 0) { - dev_err(dev, "Failed to set display brightness: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01); - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(100); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(30); - - return 0; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, + 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, + 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, + 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, + 0x03, 0xfd, 0x03, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, + 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, + 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, + 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, + 0x03, 0xfd, 0x03, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, + 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, + 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, + 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, + 0x03, 0xfd, 0x03, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, + 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, + 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, + 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, + 0x03, 0xf5, 0x03, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, + 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, + 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, + 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, + 0x03, 0xf5, 0x03, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, + 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, + 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, + 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, + 0x03, 0xf5, 0x03, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00); + + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43); + + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, + 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x05, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00); + + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d); + + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02); + + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01); + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 100); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 30); + + return dsi_ctx.accum_err; } static const struct drm_display_mode elish_boe_modes[] = { @@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel) static int nt36523_disable(struct drm_panel *panel) { struct panel_info *pinfo = to_panel_info(panel); - int i, ret; + int i; for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) { - ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]); - if (ret < 0) - dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]}; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); } for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) { - ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]); - if (ret < 0) - dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]}; + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); } msleep(70); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c index f23d8832a1ad..93f11e2e9398 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c @@ -34,8 +34,8 @@ struct s6d7aa0 { struct s6d7aa0_panel_desc { unsigned int panel_type; - int (*init_func)(struct s6d7aa0 *ctx); - int (*off_func)(struct s6d7aa0 *ctx); + void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx); + void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx); const struct drm_display_mode *drm_mode; unsigned long mode_flags; u32 bus_flags; @@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx) msleep(50); } -static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock) +static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock) { - struct mipi_dsi_device *dsi = ctx->dsi; - if (lock) { - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5); if (ctx->desc->use_passwd3) - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a); } else { - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a); - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a); if (ctx->desc->use_passwd3) - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5); } - - return 0; } static int s6d7aa0_on(struct s6d7aa0 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = ctx->desc->init_func(ctx); - if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); - gpiod_set_value_cansleep(ctx->reset_gpio, 1); - return ret; - } + ctx->desc->init_func(ctx, &dsi_ctx); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - return 0; + return dsi_ctx.accum_err; } -static int s6d7aa0_off(struct s6d7aa0 *ctx) +static void s6d7aa0_off(struct s6d7aa0 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = ctx->desc->off_func(ctx); - if (ret < 0) { - dev_err(dev, "Panel-specific off function failed: %d\n", ret); - return ret; - } + ctx->desc->off_func(&dsi_ctx); - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(64); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 64); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); - return 0; + mipi_dsi_msleep(&dsi_ctx, 120); } static int s6d7aa0_prepare(struct drm_panel *panel) { struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel); - struct device *dev = &ctx->dsi->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators: %d\n", ret); + if (ret < 0) return ret; - } s6d7aa0_reset(ctx); ret = s6d7aa0_on(ctx); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); return ret; } @@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel) static int s6d7aa0_disable(struct drm_panel *panel) { struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel); - struct device *dev = &ctx->dsi->dev; - int ret; - ret = s6d7aa0_off(ctx); - if (ret < 0) - dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + s6d7aa0_off(ctx); return 0; } @@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = backlight_get_brightness(bl); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); - if (ret < 0) - return ret; + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness); - return 0; + return dsi_ctx.accum_err; } static int s6d7aa0_bl_get_brightness(struct backlight_device *bl) @@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi) /* Initialization code and structures for LSL080AL02 panel */ -static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + mipi_dsi_usleep_range(dsi_ctx, 20000, 25000); - usleep_range(20000, 25000); + s6d7aa0_lock(ctx, dsi_ctx, false); - ret = s6d7aa0_lock(ctx, false); - if (ret < 0) { - dev_err(dev, "Failed to unlock registers: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10); - usleep_range(1000, 1500); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10); + mipi_dsi_usleep_range(dsi_ctx, 1000, 1500); /* SEQ_B6_PARAM_8_R01 */ - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10); /* BL_CTL_ON */ - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28); - - usleep_range(5000, 6000); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04); + mipi_dsi_usleep_range(dsi_ctx, 5000, 6000); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04); - msleep(120); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00); + mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx); - ret = s6d7aa0_lock(ctx, true); - if (ret < 0) { - dev_err(dev, "Failed to lock registers: %d\n", ret); - return ret; - } + mipi_dsi_msleep(dsi_ctx, 120); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } + s6d7aa0_lock(ctx, dsi_ctx, true); - return 0; + mipi_dsi_dcs_set_display_on_multi(dsi_ctx); } -static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - /* BL_CTL_OFF */ - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20); } static const struct drm_display_mode s6d7aa0_lsl080al02_mode = { @@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = { /* Initialization code and structures for LSL080AL03 panel */ -static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; - - usleep_range(20000, 25000); + mipi_dsi_usleep_range(dsi_ctx, 20000, 25000); - ret = s6d7aa0_lock(ctx, false); - if (ret < 0) { - dev_err(dev, "Failed to unlock registers: %d\n", ret); - return ret; - } + s6d7aa0_lock(ctx, dsi_ctx, false); if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) { - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23, - 0x09); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21, - 0x80, 0x78); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23, + 0x09); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21, + 0x80, 0x78); } else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) { - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b); - mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23, - 0x09); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21, - 0x80, 0x68); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23, + 0x09); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21, + 0x80, 0x68); } - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08); - - usleep_range(10000, 11000); - - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0xcd, - 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, - 0x2e, 0x2e, 0x2e, 0x2e, 0x2e); - mipi_dsi_dcs_write_seq(dsi, 0xce, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - - ret = s6d7aa0_lock(ctx, true); - if (ret < 0) { - dev_err(dev, "Failed to lock registers: %d\n", ret); - return ret; - } + mipi_dsi_usleep_range(dsi_ctx, 10000, 11000); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03); - return 0; + mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx); + s6d7aa0_lock(ctx, dsi_ctx, true); + mipi_dsi_dcs_set_display_on_multi(dsi_ctx); } -static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00); } static const struct drm_display_mode s6d7aa0_lsl080al03_mode = { diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 04ce925b3d9d..210a25afe82b 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -22,7 +22,6 @@ struct sofef00_panel { struct mipi_dsi_device *dsi; struct regulator *supply; struct gpio_desc *reset_gpio; - const struct drm_display_mode *mode; }; static inline @@ -44,66 +43,44 @@ static void sofef00_panel_reset(struct sofef00_panel *ctx) static int sofef00_panel_on(struct sofef00_panel *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - usleep_range(10000, 11000); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - return 0; + return dsi_ctx.accum_err; } static int sofef00_panel_off(struct sofef00_panel *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(40); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 40); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(160); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 160); - return 0; + return dsi_ctx.accum_err; } static int sofef00_panel_prepare(struct drm_panel *panel) @@ -122,7 +99,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel) ret = sofef00_panel_on(ctx); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); return ret; } @@ -133,13 +109,8 @@ static int sofef00_panel_prepare(struct drm_panel *panel) static int sofef00_panel_unprepare(struct drm_panel *panel) { struct sofef00_panel *ctx = to_sofef00_panel(panel); - struct device *dev = &ctx->dsi->dev; - int ret; - - ret = sofef00_panel_off(ctx); - if (ret < 0) - dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + sofef00_panel_off(ctx); regulator_disable(ctx->supply); return 0; @@ -159,26 +130,11 @@ static const struct drm_display_mode enchilada_panel_mode = { .height_mm = 145, }; -static const struct drm_display_mode fajita_panel_mode = { - .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000, - .hdisplay = 1080, - .hsync_start = 1080 + 72, - .hsync_end = 1080 + 72 + 16, - .htotal = 1080 + 72 + 16 + 36, - .vdisplay = 2340, - .vsync_start = 2340 + 32, - .vsync_end = 2340 + 32 + 4, - .vtotal = 2340 + 32 + 4 + 18, - .width_mm = 68, - .height_mm = 145, -}; - static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; - struct sofef00_panel *ctx = to_sofef00_panel(panel); - mode = drm_mode_duplicate(connector->dev, ctx->mode); + mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode); if (!mode) return -ENOMEM; @@ -239,13 +195,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) if (!ctx) return -ENOMEM; - ctx->mode = of_device_get_match_data(dev); - - if (!ctx->mode) { - dev_err(dev, "Missing device mode\n"); - return -ENODEV; - } - ctx->supply = devm_regulator_get(dev, "vddio"); if (IS_ERR(ctx->supply)) return dev_err_probe(dev, PTR_ERR(ctx->supply), @@ -295,14 +244,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id sofef00_panel_of_match[] = { - { // OnePlus 6 / enchilada - .compatible = "samsung,sofef00", - .data = &enchilada_panel_mode, - }, - { // OnePlus 6T / fajita - .compatible = "samsung,s6e3fc2x01", - .data = &fajita_panel_mode, - }, + { .compatible = "samsung,sofef00" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sofef00_panel_of_match); diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index 729cbb0d8403..36abfa2e65e9 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -36,60 +36,49 @@ static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel) static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt) { struct mipi_dsi_device *dsi = sharp_nt->dsi; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) - return ret; + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); - msleep(120); + mipi_dsi_msleep(&dsi_ctx, 120); /* Novatek two-lane operation */ - ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1); - if (ret < 0) - return ret; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xae, 0x03); /* Set both MCU and RGB I/F to 24bpp */ - ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT | - (MIPI_DCS_PIXEL_FMT_24BIT << 4)); - if (ret < 0) - return ret; + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, + MIPI_DCS_PIXEL_FMT_24BIT | + (MIPI_DCS_PIXEL_FMT_24BIT << 4)); - return 0; + return dsi_ctx.accum_err; } static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt) { struct mipi_dsi_device *dsi = sharp_nt->dsi; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags |= MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) - return ret; + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - return 0; + return dsi_ctx.accum_err; } static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt) { struct mipi_dsi_device *dsi = sharp_nt->dsi; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) - return ret; + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) - return ret; + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); - return 0; + return dsi_ctx.accum_err; } static int sharp_nt_panel_unprepare(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 232b03c1a259..0e9b440ef327 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -579,9 +579,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) u32 bus_flags; int err; - panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); - if (!panel) - return -ENOMEM; + panel = devm_drm_panel_alloc(dev, struct panel_simple, base, + &panel_simple_funcs, desc->connector_type); + if (IS_ERR(panel)) + return PTR_ERR(panel); panel->desc = desc; @@ -694,8 +695,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); - drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type); - err = drm_panel_of_backlight(&panel->base); if (err) { dev_err_probe(dev, err, "Could not find backlight\n"); @@ -3527,6 +3526,30 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct drm_display_mode nlt_nl13676bc25_03f_mode = { + .clock = 75400, + .hdisplay = 1366, + .hsync_start = 1366 + 14, + .hsync_end = 1366 + 14 + 56, + .htotal = 1366 + 14 + 56 + 64, + .vdisplay = 768, + .vsync_start = 768 + 1, + .vsync_end = 768 + 1 + 3, + .vtotal = 768 + 1 + 3 + 22, +}; + +static const struct panel_desc nlt_nl13676bc25_03f = { + .modes = &nlt_nl13676bc25_03f_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 363, + .height = 215, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct display_timing nlt_nl192108ac18_02d_timing = { .pixelclock = { 130000000, 148350000, 163000000 }, .hactive = { 1920, 1920, 1920 }, @@ -3796,6 +3819,32 @@ static const struct panel_desc pda_91_00156_a0 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode powertip_ph128800t004_zza01_mode = { + .clock = 71150, + .hdisplay = 1280, + .hsync_start = 1280 + 48, + .hsync_end = 1280 + 48 + 32, + .htotal = 1280 + 48 + 32 + 80, + .vdisplay = 800, + .vsync_start = 800 + 9, + .vsync_end = 800 + 9 + 8, + .vtotal = 800 + 9 + 8 + 6, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static const struct panel_desc powertip_ph128800t004_zza01 = { + .modes = &powertip_ph128800t004_zza01_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 216, + .height = 135, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = { .clock = 66500, .hdisplay = 1280, @@ -4393,10 +4442,10 @@ static const struct panel_desc tianma_tm070jvhg33 = { }; /* - * The datasheet computes total blanking as back porch + front porch, not - * including sync pulse width. This is for both H and V. To make the total - * blanking and period correct, subtract the pulse width from the front - * porch. + * The TM070JDHG34-00 datasheet computes total blanking as back porch + + * front porch, not including sync pulse width. This is for both H and + * V. To make the total blanking and period correct, subtract the pulse + * width from the front porch. * * This works well for the Min and Typ values, but for Max values the sync * pulse width is higher than back porch + front porch, so work around that @@ -4405,6 +4454,10 @@ static const struct panel_desc tianma_tm070jvhg33 = { * * Exact datasheet values are added as a comment where they differ from the * ones implemented for the above reason. + * + * The P0700WXF1MBAA datasheet is even less detailed, only listing period + * and total blanking time, however the resulting values are the same as + * the TM070JDHG34-00. */ static const struct display_timing tianma_tm070jdhg34_00_timing = { .pixelclock = { 68400000, 71900000, 78100000 }, @@ -4427,6 +4480,30 @@ static const struct panel_desc tianma_tm070jdhg34_00 = { .width = 150, /* 149.76 */ .height = 94, /* 93.60 */ }, + .delay = { + .prepare = 15, /* Tp1 */ + .enable = 150, /* Tp2 */ + .disable = 150, /* Tp4 */ + .unprepare = 120, /* Tp3 */ + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + +static const struct panel_desc tianma_p0700wxf1mbaa = { + .timings = &tianma_tm070jdhg34_00_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 150, /* 149.76 */ + .height = 94, /* 93.60 */ + }, + .delay = { + .prepare = 18, /* Tr + Tp1 */ + .enable = 152, /* Tp2 + Tp5 */ + .disable = 152, /* Tp6 + Tp4 */ + .unprepare = 120, /* Tp3 */ + }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; @@ -5121,6 +5198,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "newhaven,nhd-4.3-480272ef-atxl", .data = &newhaven_nhd_43_480272ef_atxl, }, { + .compatible = "nlt,nl13676bc25-03f", + .data = &nlt_nl13676bc25_03f, + }, { .compatible = "nlt,nl192108ac18-02d", .data = &nlt_nl192108ac18_02d, }, { @@ -5154,6 +5234,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "pda,91-00156-a0", .data = &pda_91_00156_a0, }, { + .compatible = "powertip,ph128800t004-zza01", + .data = &powertip_ph128800t004_zza01, + }, { .compatible = "powertip,ph128800t006-zhc01", .data = &powertip_ph128800t006_zhc01, }, { @@ -5214,6 +5297,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "tfc,s9700rtwv43tr-01b", .data = &tfc_s9700rtwv43tr_01b, }, { + .compatible = "tianma,p0700wxf1mbaa", + .data = &tianma_p0700wxf1mbaa, + }, { .compatible = "tianma,tm070jdhg30", .data = &tianma_tm070jdhg30, }, { diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c index 17349825543f..b148e6cba9bd 100644 --- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c +++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c @@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel) static int r63353_panel_activate(struct r63353_panel *rpanel) { struct mipi_dsi_device *dsi = rpanel->dsi; - struct device *dev = &dsi->dev; - int i, ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + int i; - ret = mipi_dsi_dcs_soft_reset(dsi); - if (ret < 0) { - dev_err(dev, "Failed to do Software Reset (%d)\n", ret); - goto fail; - } + mipi_dsi_dcs_soft_reset_multi(&dsi_ctx); - usleep_range(15000, 17000); + mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode (%d)\n", ret); - goto fail; - } + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); for (i = 0; i < rpanel->pdata->init_length; i++) { const struct r63353_instr *instr = &rpanel->pdata->init[i]; - ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len); - if (ret < 0) - goto fail; + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data, + instr->len); } - msleep(120); - - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode (%d)\n", ret); - goto fail; - } + mipi_dsi_msleep(&dsi_ctx, 120); - usleep_range(5000, 10000); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display ON (%d)\n", ret); - goto fail; - } + mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000); - return 0; + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); -fail: - gpiod_set_value(rpanel->reset_gpio, 0); + if (dsi_ctx.accum_err) + gpiod_set_value(rpanel->reset_gpio, 0); - return ret; + return dsi_ctx.accum_err; } static int r63353_panel_prepare(struct drm_panel *panel) @@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel) return 0; } -static int r63353_panel_deactivate(struct r63353_panel *rpanel) +static void r63353_panel_deactivate(struct r63353_panel *rpanel) { struct mipi_dsi_device *dsi = rpanel->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display OFF (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); - usleep_range(5000, 10000); + mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode (%d)\n", ret); - return ret; - } - - return 0; + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); } static int r63353_panel_unprepare(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c new file mode 100644 index 000000000000..413849f7b4de --- /dev/null +++ b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2025, Alexander Baransky <sanyapilot496@gmail.com> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct visionox_g2647fb105 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data visionox_g2647fb105_supplies[] = { + { .supply = "vdd3p3" }, + { .supply = "vddio" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline +struct visionox_g2647fb105 *to_visionox_g2647fb105(struct drm_panel *panel) +{ + return container_of(panel, struct visionox_g2647fb105, panel); +} + +static void visionox_g2647fb105_reset(struct visionox_g2647fb105 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int visionox_g2647fb105_on(struct visionox_g2647fb105 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x32); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x17); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbf, 0xbb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xdd); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00); + + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 100); + + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + + return dsi_ctx.accum_err; +} + +static int visionox_g2647fb105_off(struct visionox_g2647fb105 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 50); + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + + return dsi_ctx.accum_err; +} + +static int visionox_g2647fb105_prepare(struct drm_panel *panel) +{ + struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + visionox_g2647fb105_reset(ctx); + + ret = visionox_g2647fb105_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + return ret; + } + + return 0; +} + +static int visionox_g2647fb105_unprepare(struct drm_panel *panel) +{ + struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = visionox_g2647fb105_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies); + + return 0; +} + +static const struct drm_display_mode visionox_g2647fb105_mode = { + .clock = (1080 + 28 + 4 + 36) * (2340 + 8 + 4 + 4) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 28, + .hsync_end = 1080 + 28 + 4, + .htotal = 1080 + 28 + 4 + 36, + .vdisplay = 2340, + .vsync_start = 2340 + 8, + .vsync_end = 2340 + 8 + 4, + .vtotal = 2340 + 8 + 4 + 4, + .width_mm = 69, + .height_mm = 149, +}; + +static int visionox_g2647fb105_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &visionox_g2647fb105_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs visionox_g2647fb105_panel_funcs = { + .prepare = visionox_g2647fb105_prepare, + .unprepare = visionox_g2647fb105_unprepare, + .get_modes = visionox_g2647fb105_get_modes, +}; + +static int visionox_g2647fb105_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops visionox_g2647fb105_bl_ops = { + .update_status = visionox_g2647fb105_bl_update_status, +}; + +static struct backlight_device * +visionox_g2647fb105_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 1023, + .max_brightness = 2047, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &visionox_g2647fb105_bl_ops, &props); +} + +static int visionox_g2647fb105_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct visionox_g2647fb105 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(visionox_g2647fb105_supplies), + visionox_g2647fb105_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + ctx->panel.prepare_prev_first = true; + + drm_panel_init(&ctx->panel, dev, &visionox_g2647fb105_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = visionox_g2647fb105_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void visionox_g2647fb105_remove(struct mipi_dsi_device *dsi) +{ + struct visionox_g2647fb105 *ctx = mipi_dsi_get_drvdata(dsi); + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id visionox_g2647fb105_of_match[] = { + { .compatible = "visionox,g2647fb105" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, visionox_g2647fb105_of_match); + +static struct mipi_dsi_driver visionox_g2647fb105_driver = { + .probe = visionox_g2647fb105_probe, + .remove = visionox_g2647fb105_remove, + .driver = { + .name = "panel-visionox-g2647fb105", + .of_match_table = visionox_g2647fb105_of_match, + }, +}; +module_mipi_dsi_driver(visionox_g2647fb105_driver); + +MODULE_AUTHOR("Alexander Baransky <sanyapilot496@gmail.com>"); +MODULE_DESCRIPTION("DRM driver for Visionox G2647FB105 AMOLED DSI panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index a45e4addcc19..5d35076b2e6d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -209,10 +209,20 @@ int panfrost_device_init(struct panfrost_device *pfdev) spin_lock_init(&pfdev->cycle_counter.lock); + err = panfrost_pm_domain_init(pfdev); + if (err) + return err; + + err = panfrost_reset_init(pfdev); + if (err) { + dev_err(pfdev->dev, "reset init failed %d\n", err); + goto out_pm_domain; + } + err = panfrost_clk_init(pfdev); if (err) { dev_err(pfdev->dev, "clk init failed %d\n", err); - return err; + goto out_reset; } err = panfrost_devfreq_init(pfdev); @@ -229,25 +239,15 @@ int panfrost_device_init(struct panfrost_device *pfdev) goto out_devfreq; } - err = panfrost_reset_init(pfdev); - if (err) { - dev_err(pfdev->dev, "reset init failed %d\n", err); - goto out_regulator; - } - - err = panfrost_pm_domain_init(pfdev); - if (err) - goto out_reset; - pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0); if (IS_ERR(pfdev->iomem)) { err = PTR_ERR(pfdev->iomem); - goto out_pm_domain; + goto out_regulator; } err = panfrost_gpu_init(pfdev); if (err) - goto out_pm_domain; + goto out_regulator; err = panfrost_mmu_init(pfdev); if (err) @@ -268,16 +268,16 @@ out_mmu: panfrost_mmu_fini(pfdev); out_gpu: panfrost_gpu_fini(pfdev); -out_pm_domain: - panfrost_pm_domain_fini(pfdev); -out_reset: - panfrost_reset_fini(pfdev); out_regulator: panfrost_regulator_fini(pfdev); out_devfreq: panfrost_devfreq_fini(pfdev); out_clk: panfrost_clk_fini(pfdev); +out_reset: + panfrost_reset_fini(pfdev); +out_pm_domain: + panfrost_pm_domain_fini(pfdev); return err; } @@ -287,11 +287,11 @@ void panfrost_device_fini(struct panfrost_device *pfdev) panfrost_job_fini(pfdev); panfrost_mmu_fini(pfdev); panfrost_gpu_fini(pfdev); - panfrost_pm_domain_fini(pfdev); - panfrost_reset_fini(pfdev); panfrost_devfreq_fini(pfdev); panfrost_regulator_fini(pfdev); panfrost_clk_fini(pfdev); + panfrost_reset_fini(pfdev); + panfrost_pm_domain_fini(pfdev); } #define PANFROST_EXCEPTION(id) \ @@ -406,11 +406,36 @@ void panfrost_device_reset(struct panfrost_device *pfdev) static int panfrost_device_runtime_resume(struct device *dev) { struct panfrost_device *pfdev = dev_get_drvdata(dev); + int ret; + + if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) { + ret = reset_control_deassert(pfdev->rstc); + if (ret) + return ret; + + ret = clk_enable(pfdev->clock); + if (ret) + goto err_clk; + + if (pfdev->bus_clock) { + ret = clk_enable(pfdev->bus_clock); + if (ret) + goto err_bus_clk; + } + } panfrost_device_reset(pfdev); panfrost_devfreq_resume(pfdev); return 0; + +err_bus_clk: + if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) + clk_disable(pfdev->clock); +err_clk: + if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) + reset_control_assert(pfdev->rstc); + return ret; } static int panfrost_device_runtime_suspend(struct device *dev) @@ -426,6 +451,14 @@ static int panfrost_device_runtime_suspend(struct device *dev) panfrost_gpu_suspend_irq(pfdev); panfrost_gpu_power_off(pfdev); + if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) { + if (pfdev->bus_clock) + clk_disable(pfdev->bus_clock); + + clk_disable(pfdev->clock); + reset_control_assert(pfdev->rstc); + } + return 0; } diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index cffcb0ac7c11..dcff70f905cd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -36,10 +36,21 @@ enum panfrost_drv_comp_bits { * enum panfrost_gpu_pm - Supported kernel power management features * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend * @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend + * @GPU_PM_RT: Allow disabling clocks and asserting the reset control during + * system runtime suspend */ enum panfrost_gpu_pm { GPU_PM_CLK_DIS, GPU_PM_VREG_OFF, + GPU_PM_RT +}; + +/** + * enum panfrost_gpu_quirks - GPU optional quirks + * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format + */ +enum panfrost_gpu_quirks { + GPU_QUIRK_FORCE_AARCH64_PGTABLE, }; struct panfrost_features { @@ -95,6 +106,9 @@ struct panfrost_compatible { /* Allowed PM features */ u8 pm_features; + + /* GPU configuration quirks */ + u8 gpu_quirks; }; struct panfrost_device { @@ -162,6 +176,11 @@ struct panfrost_mmu { int as; atomic_t as_count; struct list_head list; + struct { + u64 transtab; + u64 memattr; + u64 transcfg; + } cfg; }; struct panfrost_engine_usage { diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 0f3935556ac7..f1ec3b02f15a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -476,7 +476,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, } } - args->retained = drm_gem_shmem_madvise(&bo->base, args->madv); + args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv); if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) @@ -776,6 +776,13 @@ static const struct panfrost_compatible default_data = { .pm_domain_names = NULL, }; +static const struct panfrost_compatible allwinner_h616_data = { + .num_supplies = ARRAY_SIZE(default_supplies) - 1, + .supply_names = default_supplies, + .num_pm_domains = 1, + .pm_features = BIT(GPU_PM_RT), +}; + static const struct panfrost_compatible amlogic_data = { .num_supplies = ARRAY_SIZE(default_supplies) - 1, .supply_names = default_supplies, @@ -824,6 +831,7 @@ static const struct panfrost_compatible mediatek_mt8188_data = { .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), .pm_domain_names = mediatek_mt8183_pm_domains, .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), + .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), }; static const char * const mediatek_mt8192_supplies[] = { "mali", NULL }; @@ -835,6 +843,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = { .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains), .pm_domain_names = mediatek_mt8192_pm_domains, .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), + .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE), }; static const struct of_device_id dt_match[] = { @@ -859,6 +868,7 @@ static const struct of_device_id dt_match[] = { { .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data }, { .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data }, { .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data }, + { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data }, {} }; MODULE_DEVICE_TABLE(of, dt_match); diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 47751302f1bc..4042afe2fbf4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job) goto dump_header; } - ret = drm_gem_vmap_unlocked(&bo->base.base, &map); + ret = drm_gem_vmap(&bo->base.base, &map); if (ret) { dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); iter.hdr->bomap.valid = 0; @@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job) vaddr = map.vaddr; memcpy(iter.data, vaddr, bo->base.base.size); - drm_gem_vunmap_unlocked(&bo->base.base, &map); + drm_gem_vunmap(&bo->base.base, &map); iter.hdr->bomap.valid = 1; diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h index 7ed0cd3ea2d4..52f9d69f6db9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_features.h +++ b/drivers/gpu/drm/panfrost/panfrost_features.h @@ -54,6 +54,7 @@ enum panfrost_hw_feature { BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \ + BIT_ULL(HW_FEATURE_AARCH64_MMU) | \ BIT_ULL(HW_FEATURE_COHERENCY_REG)) #define hw_features_g72 (\ @@ -64,6 +65,7 @@ enum panfrost_hw_feature { BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \ + BIT_ULL(HW_FEATURE_AARCH64_MMU) | \ BIT_ULL(HW_FEATURE_COHERENCY_REG)) #define hw_features_g51 hw_features_g72 @@ -77,6 +79,7 @@ enum panfrost_hw_feature { BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \ BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \ + BIT_ULL(HW_FEATURE_AARCH64_MMU) | \ BIT_ULL(HW_FEATURE_COHERENCY_REG)) #define hw_features_g76 (\ diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 8e0ff3efede7..963f04ba2de6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj struct panfrost_gem_object *bo = to_panfrost_bo(obj); enum drm_gem_object_status res = 0; - if (bo->base.base.import_attach || bo->base.pages) + if (drm_gem_is_imported(&bo->base.base) || bo->base.pages) res |= DRM_GEM_OBJECT_RESIDENT; if (bo->base.madv == PANFROST_MADV_DONTNEED) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 3d9f51bd48b6..02b60ea1433a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge(&bo->base); + drm_gem_shmem_purge_locked(&bo->base); ret = true; dma_resv_unlock(shmem->base.resv); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index b91019cd5acb..f6b91c052cfb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -26,6 +26,48 @@ #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg) #define mmu_read(dev, reg) readl(dev->iomem + reg) +static u64 mair_to_memattr(u64 mair, bool coherent) +{ + u64 memattr = 0; + u32 i; + + for (i = 0; i < 8; i++) { + u8 in_attr = mair >> (8 * i), out_attr; + u8 outer = in_attr >> 4, inner = in_attr & 0xf; + + /* For caching to be enabled, inner and outer caching policy + * have to be both write-back, if one of them is write-through + * or non-cacheable, we just choose non-cacheable. Device + * memory is also translated to non-cacheable. + */ + if (!(outer & 3) || !(outer & 4) || !(inner & 4)) { + out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC | + AS_MEMATTR_AARCH64_SH_MIDGARD_INNER | + AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false); + } else { + out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB | + AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2); + /* Use SH_MIDGARD_INNER mode when device isn't coherent, + * so SH_IS, which is used when IOMMU_CACHE is set, maps + * to Mali's internal-shareable mode. As per the Mali + * Spec, inner and outer-shareable modes aren't allowed + * for WB memory when coherency is disabled. + * Use SH_CPU_INNER mode when coherency is enabled, so + * that SH_IS actually maps to the standard definition of + * inner-shareable. + */ + if (!coherent) + out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER; + else + out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER; + } + + memattr |= (u64)out_attr << (8 * i); + } + + return memattr; +} + static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) { int ret; @@ -124,9 +166,9 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) { int as_nr = mmu->as; - struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; - u64 transtab = cfg->arm_mali_lpae_cfg.transtab; - u64 memattr = cfg->arm_mali_lpae_cfg.memattr; + u64 transtab = mmu->cfg.transtab; + u64 memattr = mmu->cfg.memattr; + u64 transcfg = mmu->cfg.transcfg; mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); @@ -139,6 +181,9 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr)); mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr)); + mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg)); + mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg)); + write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } @@ -152,9 +197,67 @@ static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr) mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0); mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0); + mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED); + mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0); + write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } +static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu) +{ + struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg; + + /* TODO: The following fields are duplicated between the MMU and Page + * Table config structs. Ideally, should be kept in one place. + */ + mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab; + mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr; + mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY; + + return 0; +} + +static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu) +{ + struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg; + struct panfrost_device *pfdev = mmu->pfdev; + + if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & + ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK)) + return -EINVAL; + + mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; + + mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair, + pgtbl_cfg->coherent_walk); + + mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB | + AS_TRANSCFG_PTW_RA | + AS_TRANSCFG_ADRMODE_AARCH64_4K | + AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias); + if (pgtbl_cfg->coherent_walk) + mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS; + + return 0; +} + +static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu, + enum io_pgtable_fmt fmt) +{ + struct panfrost_device *pfdev = mmu->pfdev; + + switch (fmt) { + case ARM_64_LPAE_S1: + return mmu_cfg_init_aarch64_4k(mmu); + case ARM_MALI_LPAE: + return mmu_cfg_init_mali_lpae(mmu); + default: + /* This should never happen */ + drm_WARN(pfdev->ddev, 1, "Invalid pgtable format"); + return -EINVAL; + } +} + u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) { int as; @@ -327,7 +430,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) struct drm_gem_object *obj = &shmem->base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; - int prot = IOMMU_READ | IOMMU_WRITE; + int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE; if (WARN_ON(mapping->active)) return 0; @@ -489,7 +592,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_unlock; } bo->base.pages = pages; - bo->base.pages_use_count = 1; + refcount_set(&bo->base.pages_use_count, 1); } else { pages = bo->base.pages; if (pages[page_offset]) { @@ -528,7 +631,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_map; mmu_map_sg(pfdev, bomapping->mmu, addr, - IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt); bomapping->active = true; bo->heap_rss_size += SZ_2M; @@ -615,7 +718,22 @@ static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) { + u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features); + u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features); struct panfrost_mmu *mmu; + enum io_pgtable_fmt fmt; + int ret; + + if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) { + if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) { + dev_err_once(pfdev->dev, + "AARCH64_4K page table not supported\n"); + return ERR_PTR(-EINVAL); + } + fmt = ARM_64_LPAE_S1; + } else { + fmt = ARM_MALI_LPAE; + } mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); if (!mmu) @@ -633,23 +751,33 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) mmu->pgtbl_cfg = (struct io_pgtable_cfg) { .pgsize_bitmap = SZ_4K | SZ_2M, - .ias = FIELD_GET(0xff, pfdev->features.mmu_features), - .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), + .ias = va_bits, + .oas = pa_bits, .coherent_walk = pfdev->coherent, .tlb = &mmu_tlb_ops, .iommu_dev = pfdev->dev, }; - mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, - mmu); + mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu); if (!mmu->pgtbl_ops) { - kfree(mmu); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto err_free_mmu; } + ret = panfrost_mmu_cfg_init(mmu, fmt); + if (ret) + goto err_free_io_pgtable; + kref_init(&mmu->refcount); return mmu; + +err_free_io_pgtable: + free_io_pgtable_ops(mmu->pgtbl_ops); + +err_free_mmu: + kfree(mmu); + return ERR_PTR(ret); } static const char *access_type_name(struct panfrost_device *pfdev, diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index ba9b6e2b2636..52befead08c6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_close_bo; } - ret = drm_gem_vmap_unlocked(&bo->base, &map); + ret = drm_gem_vmap(&bo->base, &map); if (ret) goto err_put_mapping; perfcnt->buf = map.vaddr; @@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, return 0; err_vunmap: - drm_gem_vunmap_unlocked(&bo->base, &map); + drm_gem_vunmap(&bo->base, &map); err_put_mapping: panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: @@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map); + drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index c7bba476ab3f..2b8f1617b836 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -16,6 +16,8 @@ #define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */ #define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */ +#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0)) +#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0)) #define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */ #define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */ @@ -299,6 +301,17 @@ #define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */ #define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */ #define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */ +#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2) +#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \ + ((w) ? BIT(0) : 0) | \ + ((r) ? BIT(1) : 0)) +#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4) +#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4) +#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4) +#define AS_MEMATTR_AARCH64_SHARED (0 << 6) +#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6) +#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6) +#define AS_MEMATTR_AARCH64_FAULT (3 << 6) #define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */ #define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */ #define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */ @@ -309,6 +322,24 @@ /* Additional Bifrost AS registers */ #define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */ #define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */ +#define AS_TRANSCFG_ADRMODE_LEGACY (0 << 0) +#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0) +#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0) +#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0) +#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0) +#define AS_TRANSCFG_INA_BITS(x) ((x) << 6) +#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14) +#define AS_TRANSCFG_SL_CONCAT BIT(22) +#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24) +#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24) +#define AS_TRANSCFG_PTW_SH_NS (0 << 28) +#define AS_TRANSCFG_PTW_SH_OS (2 << 28) +#define AS_TRANSCFG_PTW_SH_IS (3 << 28) +#define AS_TRANSCFG_PTW_RA BIT(30) +#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33) +#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34) +#define AS_TRANSCFG_WXN BIT(35) +#define AS_TRANSCFG_XREADABLE BIT(36) #define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */ #define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */ @@ -324,6 +355,11 @@ #define AS_TRANSTAB_LPAE_READ_INNER BIT(2) #define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4) +/* + * Begin AARCH64_4K MMU TRANSTAB register values + */ +#define AS_TRANSTAB_AARCH64_4K_ADDR_MASK 0xfffffffffffffff0 + #define AS_STATUS_AS_ACTIVE 0x01 #define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8) diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index a9da1d1eeb70..f0b2da5b2b96 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -171,10 +171,6 @@ int panthor_device_init(struct panthor_device *ptdev) struct page *p; int ret; - ret = panthor_gpu_coherency_init(ptdev); - if (ret) - return ret; - init_completion(&ptdev->unplug.done); ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock); if (ret) @@ -184,6 +180,11 @@ int panthor_device_init(struct panthor_device *ptdev) if (ret) return ret; +#ifdef CONFIG_DEBUG_FS + drmm_mutex_init(&ptdev->base, &ptdev->gems.lock); + INIT_LIST_HEAD(&ptdev->gems.node); +#endif + atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED); p = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!p) @@ -247,6 +248,10 @@ int panthor_device_init(struct panthor_device *ptdev) if (ret) goto err_rpm_put; + ret = panthor_gpu_coherency_init(ptdev); + if (ret) + goto err_unplug_gpu; + ret = panthor_mmu_init(ptdev); if (ret) goto err_unplug_gpu; diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index da6574021664..465d3ab1b79e 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -205,6 +205,17 @@ struct panthor_device { /** @fast_rate: Maximum device clock frequency. Set by DVFS */ unsigned long fast_rate; + +#ifdef CONFIG_DEBUG_FS + /** @gems: Device-wide list of GEM objects owned by at least one file. */ + struct { + /** @gems.lock: Protects the device-wide list of GEM objects. */ + struct mutex lock; + + /** @node: Used to keep track of all the device's DRM objects */ + struct list_head node; + } gems; +#endif }; struct panthor_gpu_usage { @@ -383,8 +394,6 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da if (!status) \ break; \ \ - gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \ - \ __handler(ptdev, status); \ ret = IRQ_HANDLED; \ } \ diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 06fe46e32073..6200cad22563 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -940,6 +940,7 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panthor_bo_mmap_offset *args = data; + struct panthor_gem_object *bo; struct drm_gem_object *obj; int ret; @@ -950,6 +951,12 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data, if (!obj) return -ENOENT; + bo = to_panthor_bo(obj); + if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) { + ret = -EPERM; + goto out; + } + ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; @@ -1331,6 +1338,46 @@ static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data, return 0; } +static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_panthor_bo_set_label *args = data; + struct drm_gem_object *obj; + const char *label = NULL; + int ret = 0; + + if (args->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + if (args->label) { + label = strndup_user((const char __user *)(uintptr_t)args->label, + PANTHOR_BO_LABEL_MAXLEN); + if (IS_ERR(label)) { + ret = PTR_ERR(label); + if (ret == -EINVAL) + ret = -E2BIG; + goto err_put_obj; + } + } + + /* + * We treat passing a label of length 0 and passing a NULL label + * differently, because even though they might seem conceptually + * similar, future uses of the BO label might expect a different + * behaviour in each case. + */ + panthor_gem_bo_set_label(obj, label); + +err_put_obj: + drm_gem_object_put(obj); + + return ret; +} + static int panthor_open(struct drm_device *ddev, struct drm_file *file) { @@ -1400,6 +1447,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = { PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW), PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW), PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW), + PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW), }; static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) @@ -1496,9 +1544,34 @@ static const struct file_operations panthor_drm_driver_fops = { }; #ifdef CONFIG_DEBUG_FS +static int panthor_gems_show(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct panthor_device *ptdev = container_of(dev, struct panthor_device, base); + + panthor_gem_debugfs_print_bos(ptdev, m); + + return 0; +} + +static struct drm_info_list panthor_debugfs_list[] = { + {"gems", panthor_gems_show, 0, NULL}, +}; + +static int panthor_gems_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(panthor_debugfs_list, + ARRAY_SIZE(panthor_debugfs_list), + minor->debugfs_root, minor); + + return 0; +} + static void panthor_debugfs_init(struct drm_minor *minor) { panthor_mmu_debugfs_init(minor); + panthor_gems_debugfs_init(minor); } #endif @@ -1509,6 +1582,7 @@ static void panthor_debugfs_init(struct drm_minor *minor) * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query * - adds PANTHOR_GROUP_PRIORITY_REALTIME priority * - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag + * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl */ static const struct drm_driver panthor_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ | @@ -1522,7 +1596,7 @@ static const struct drm_driver panthor_drm_driver = { .name = "panthor", .desc = "Panthor DRM driver", .major = 1, - .minor = 3, + .minor = 4, .gem_create_object = panthor_gem_create_object, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 0f52766a3120..7bc38e635329 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -449,7 +449,8 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "Queue FW interface"); if (IS_ERR(mem)) return mem; @@ -481,7 +482,8 @@ panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size) return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "FW suspend buffer"); } static int panthor_fw_load_section_entry(struct panthor_device *ptdev, @@ -601,7 +603,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), section_size, DRM_PANTHOR_BO_NO_MMAP, - vm_map_flags, va); + vm_map_flags, va, "FW section"); if (IS_ERR(section->mem)) return PTR_ERR(section->mem); @@ -1008,6 +1010,8 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev) static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status) { + gpu_write(ptdev, JOB_INT_CLEAR, status); + if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF)) ptdev->fw->booted = true; diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 8244a4e6c2a2..2dcf308094b2 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -2,6 +2,7 @@ /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ /* Copyright 2023 Collabora ltd. */ +#include <linux/cleanup.h> #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/err.h> @@ -10,14 +11,59 @@ #include <drm/panthor_drm.h> #include "panthor_device.h" +#include "panthor_fw.h" #include "panthor_gem.h" #include "panthor_mmu.h" +#ifdef CONFIG_DEBUG_FS +static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, + struct panthor_gem_object *bo) +{ + INIT_LIST_HEAD(&bo->debugfs.node); + + bo->debugfs.creator.tgid = current->group_leader->pid; + get_task_comm(bo->debugfs.creator.process_name, current->group_leader); + + mutex_lock(&ptdev->gems.lock); + list_add_tail(&bo->debugfs.node, &ptdev->gems.node); + mutex_unlock(&ptdev->gems.lock); +} + +static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) +{ + struct panthor_device *ptdev = container_of(bo->base.base.dev, + struct panthor_device, base); + + if (list_empty(&bo->debugfs.node)) + return; + + mutex_lock(&ptdev->gems.lock); + list_del_init(&bo->debugfs.node); + mutex_unlock(&ptdev->gems.lock); +} + +#else +static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, + struct panthor_gem_object *bo) +{} +static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {} +#endif + static void panthor_gem_free_object(struct drm_gem_object *obj) { struct panthor_gem_object *bo = to_panthor_bo(obj); struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem; + panthor_gem_debugfs_bo_rm(bo); + + /* + * Label might have been allocated with kstrdup_const(), + * we need to take that into account when freeing the memory + */ + kfree_const(bo->label.str); + + mutex_destroy(&bo->label.lock); + drm_gem_free_mmap_offset(&bo->base.base); mutex_destroy(&bo->gpuva_list_lock); drm_gem_shmem_free(&bo->base); @@ -67,17 +113,19 @@ out_free_bo: * @gpu_va: GPU address assigned when mapping to the VM. * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be * automatically allocated. + * @name: Descriptive label of the BO's contents * * Return: A valid pointer in case of success, an ERR_PTR() otherwise. */ struct panthor_kernel_bo * panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, size_t size, u32 bo_flags, u32 vm_map_flags, - u64 gpu_va) + u64 gpu_va, const char *name) { struct drm_gem_shmem_object *obj; struct panthor_kernel_bo *kbo; struct panthor_gem_object *bo; + u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL; int ret; if (drm_WARN_ON(&ptdev->base, !vm)) @@ -97,6 +145,12 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, kbo->obj = &obj->base; bo->flags = bo_flags; + if (vm == panthor_fw_vm(ptdev)) + debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; + + panthor_gem_kernel_bo_set_label(kbo, name); + panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags); + /* The system and GPU MMU page size might differ, which becomes a * problem for FW sections that need to be mapped at explicit address * since our PAGE_SIZE alignment might cover a VA range that's @@ -129,17 +183,6 @@ err_free_bo: return ERR_PTR(ret); } -static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - struct panthor_gem_object *bo = to_panthor_bo(obj); - - /* Don't allow mmap on objects that have the NO_MMAP flag set. */ - if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) - return -EINVAL; - - return drm_gem_shmem_object_mmap(obj, vma); -} - static struct dma_buf * panthor_gem_prime_export(struct drm_gem_object *obj, int flags) { @@ -155,7 +198,7 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) struct panthor_gem_object *bo = to_panthor_bo(obj); enum drm_gem_object_status res = 0; - if (bo->base.base.import_attach || bo->base.pages) + if (drm_gem_is_imported(&bo->base.base) || bo->base.pages) res |= DRM_GEM_OBJECT_RESIDENT; return res; @@ -169,7 +212,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = { .get_sg_table = drm_gem_shmem_object_get_sg_table, .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, - .mmap = panthor_gem_mmap, + .mmap = drm_gem_shmem_object_mmap, .status = panthor_gem_status, .export = panthor_gem_prime_export, .vm_ops = &drm_gem_shmem_vm_ops, @@ -196,6 +239,9 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t obj->base.map_wc = !ptdev->coherent; mutex_init(&obj->gpuva_list_lock); drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); + mutex_init(&obj->label.lock); + + panthor_gem_debugfs_bo_add(ptdev, obj); return &obj->base.base; } @@ -245,5 +291,153 @@ panthor_gem_create_with_handle(struct drm_file *file, /* drop reference from allocate - handle holds it now. */ drm_gem_object_put(&shmem->base); + /* + * No explicit flags are needed in the call below, since the + * function internally sets the INITIALIZED bit for us. + */ + panthor_gem_debugfs_set_usage_flags(bo, 0); + return ret; } + +void +panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label) +{ + struct panthor_gem_object *bo = to_panthor_bo(obj); + const char *old_label; + + scoped_guard(mutex, &bo->label.lock) { + old_label = bo->label.str; + bo->label.str = label; + } + + kfree_const(old_label); +} + +void +panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label) +{ + const char *str; + + /* We should never attempt labelling a UM-exposed GEM object */ + if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0)) + return; + + if (!label) + return; + + str = kstrdup_const(label, GFP_KERNEL); + if (!str) { + /* Failing to allocate memory for a label isn't a fatal condition */ + drm_warn(bo->obj->dev, "Not enough memory to allocate BO label"); + return; + } + + panthor_gem_bo_set_label(bo->obj, str); +} + +#ifdef CONFIG_DEBUG_FS +struct gem_size_totals { + size_t size; + size_t resident; + size_t reclaimable; +}; + +static void panthor_gem_debugfs_print_flag_names(struct seq_file *m) +{ + int len; + int i; + + static const char * const gem_state_flags_names[] = { + [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported", + [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported", + }; + + static const char * const gem_usage_flags_names[] = { + [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel", + [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped", + }; + + seq_puts(m, "GEM state flags: "); + for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) { + if (!gem_state_flags_names[i]) + continue; + seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i], + (u32)BIT(i), (i < len - 1) ? ", " : "\n"); + } + + seq_puts(m, "GEM usage flags: "); + for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) { + if (!gem_usage_flags_names[i]) + continue; + seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i], + (u32)BIT(i), (i < len - 1) ? ", " : "\n\n"); + } +} + +static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo, + struct seq_file *m, + struct gem_size_totals *totals) +{ + unsigned int refcount = kref_read(&bo->base.base.refcount); + char creator_info[32] = {}; + size_t resident_size; + u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; + u32 gem_state_flags = 0; + + /* Skip BOs being destroyed. */ + if (!refcount) + return; + + resident_size = bo->base.pages ? bo->base.base.size : 0; + + snprintf(creator_info, sizeof(creator_info), + "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); + seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx", + creator_info, + bo->base.base.name, + refcount, + bo->base.base.size, + resident_size, + drm_vma_node_start(&bo->base.base.vma_node)); + + if (bo->base.base.import_attach) + gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED; + if (bo->base.base.dma_buf) + gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED; + + seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags); + + scoped_guard(mutex, &bo->label.lock) { + seq_printf(m, "%s\n", bo->label.str ? : ""); + } + + totals->size += bo->base.base.size; + totals->resident += resident_size; + if (bo->base.madv > 0) + totals->reclaimable += resident_size; +} + +void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev, + struct seq_file *m) +{ + struct gem_size_totals totals = {0}; + struct panthor_gem_object *bo; + + panthor_gem_debugfs_print_flag_names(m); + + seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n"); + seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n"); + + scoped_guard(mutex, &ptdev->gems.lock) { + list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) { + if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED) + panthor_gem_debugfs_bo_print(bo, m, &totals); + } + } + + seq_puts(m, "==============================================================================================================================================\n"); + seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n", + totals.size, totals.resident, totals.reclaimable); +} +#endif diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h index 5749ef2ebe03..4641994ddd7f 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.h +++ b/drivers/gpu/drm/panthor/panthor_gem.h @@ -13,6 +13,56 @@ struct panthor_vm; +#define PANTHOR_BO_LABEL_MAXLEN 4096 + +enum panthor_debugfs_gem_state_flags { + PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT = 0, + PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT = 1, + + /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */ + PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT), + + /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */ + PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT), +}; + +enum panthor_debugfs_gem_usage_flags { + PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT = 0, + PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT = 1, + + /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL: BO is for kernel use only. */ + PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL = BIT(PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT), + + /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */ + PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT), + + /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */ + PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31), +}; + +/** + * struct panthor_gem_debugfs - GEM object's DebugFS list information + */ +struct panthor_gem_debugfs { + /** + * @node: Node used to insert the object in the device-wide list of + * GEM objects, to display information about it through a DebugFS file. + */ + struct list_head node; + + /** @creator: Information about the UM process which created the GEM. */ + struct { + /** @creator.process_name: Group leader name in owning thread's process */ + char process_name[TASK_COMM_LEN]; + + /** @creator.tgid: PID of the thread's group leader within its process */ + pid_t tgid; + } creator; + + /** @flags: Combination of panthor_debugfs_gem_usage_flags flags */ + u32 flags; +}; + /** * struct panthor_gem_object - Driver specific GEM object. */ @@ -46,6 +96,24 @@ struct panthor_gem_object { /** @flags: Combination of drm_panthor_bo_flags flags. */ u32 flags; + + /** + * @label: BO tagging fields. The label can be assigned within the + * driver itself or through a specific IOCTL. + */ + struct { + /** + * @label.str: Pointer to NULL-terminated string, + */ + const char *str; + + /** @lock.str: Protects access to the @label.str field. */ + struct mutex lock; + } label; + +#ifdef CONFIG_DEBUG_FS + struct panthor_gem_debugfs debugfs; +#endif }; /** @@ -91,6 +159,9 @@ panthor_gem_create_with_handle(struct drm_file *file, struct panthor_vm *exclusive_vm, u64 *size, u32 flags, uint32_t *handle); +void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label); +void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label); + static inline u64 panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo) { @@ -112,7 +183,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo) if (bo->kmap) return 0; - ret = drm_gem_vmap_unlocked(bo->obj, &map); + ret = drm_gem_vmap(bo->obj, &map); if (ret) return ret; @@ -126,7 +197,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo) if (bo->kmap) { struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap); - drm_gem_vunmap_unlocked(bo->obj, &map); + drm_gem_vunmap(bo->obj, &map); bo->kmap = NULL; } } @@ -134,8 +205,21 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo) struct panthor_kernel_bo * panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, size_t size, u32 bo_flags, u32 vm_map_flags, - u64 gpu_va); + u64 gpu_va, const char *name); void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo); +#ifdef CONFIG_DEBUG_FS +void panthor_gem_debugfs_print_bos(struct panthor_device *pfdev, + struct seq_file *m); +static inline void +panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) +{ + bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; +} + +#else +void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}; +#endif + #endif /* __PANTHOR_GEM_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 671049020afa..32d678a0114e 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -150,6 +150,8 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev) static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) { + gpu_write(ptdev, GPU_INT_CLEAR, status); + if (status & GPU_IRQ_FAULT) { u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS); u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) | diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c index 3bdf61c14264..d236e9ceade4 100644 --- a/drivers/gpu/drm/panthor/panthor_heap.c +++ b/drivers/gpu/drm/panthor/panthor_heap.c @@ -151,7 +151,8 @@ static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool, chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "Tiler heap chunk"); if (IS_ERR(chunk->bo)) { ret = PTR_ERR(chunk->bo); goto err_free_chunk; @@ -555,7 +556,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm) pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "Heap pool"); if (IS_ERR(pool->gpu_contexts)) { ret = PTR_ERR(pool->gpu_contexts); goto err_destroy_pool; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 12a02e28f50f..6ca9a2642a4e 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -781,6 +781,7 @@ out_enable_as: if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) { gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as)); ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as); + ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as); gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask); } @@ -1103,7 +1104,7 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo) /* If the vm_bo object was destroyed, release the pin reference that * was hold by this object. */ - if (unpin && !bo->base.base.import_attach) + if (unpin && !drm_gem_is_imported(&bo->base.base)) drm_gem_shmem_unpin(&bo->base); drm_gpuvm_put(vm); @@ -1234,7 +1235,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, if (ret) goto err_cleanup; - if (!bo->base.base.import_attach) { + if (!drm_gem_is_imported(&bo->base.base)) { /* Pre-reserve the BO pages, so the map operation doesn't have to * allocate. */ @@ -1245,7 +1246,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, sgt = drm_gem_shmem_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) { - if (!bo->base.base.import_attach) + if (!drm_gem_is_imported(&bo->base.base)) drm_gem_shmem_unpin(&bo->base); ret = PTR_ERR(sgt); @@ -1256,7 +1257,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base); if (!preallocated_vm_bo) { - if (!bo->base.base.import_attach) + if (!drm_gem_is_imported(&bo->base.base)) drm_gem_shmem_unpin(&bo->base); ret = -ENOMEM; @@ -1282,7 +1283,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, * which will be released in panthor_vm_bo_put(). */ if (preallocated_vm_bo != op_ctx->map.vm_bo && - !bo->base.base.import_attach) + !drm_gem_is_imported(&bo->base.base)) drm_gem_shmem_unpin(&bo->base); op_ctx->map.bo_offset = offset; @@ -1709,11 +1710,17 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) access_type, access_type_name(ptdev, fault_status), source_id); + /* We don't handle VM faults at the moment, so let's just clear the + * interrupt and let the writer/reader crash. + * Note that COMPLETED irqs are never cleared, but this is fine + * because they are always masked. + */ + gpu_write(ptdev, MMU_INT_CLEAR, mask); + /* Ignore MMU interrupts on this AS until it's been * re-enabled. */ ptdev->mmu->irq.mask = new_int_mask; - gpu_write(ptdev, MMU_INT_MASK, new_int_mask); if (ptdev->mmu->as.slots[as].vm) ptdev->mmu->as.slots[as].vm->unhandled_fault = true; diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index b7b3b3add166..a7a323dc5cf9 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -133,8 +133,8 @@ #define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name) #define GPU_COHERENCY_PROTOCOL 0x304 -#define GPU_COHERENCY_ACE 0 -#define GPU_COHERENCY_ACE_LITE 1 +#define GPU_COHERENCY_ACE_LITE 0 +#define GPU_COHERENCY_ACE 1 #define GPU_COHERENCY_NONE 31 #define MCU_CONTROL 0x700 diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 4d31d1967716..43ee57728de5 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue) if (queue->syncwait.kmap) { struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); - drm_gem_vunmap_unlocked(queue->syncwait.obj, &map); + drm_gem_vunmap(queue->syncwait.obj, &map); queue->syncwait.kmap = NULL; } @@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue goto err_put_syncwait_obj; queue->syncwait.obj = &bo->base.base; - ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map); + ret = drm_gem_vmap(queue->syncwait.obj, &map); if (drm_WARN_ON(&ptdev->base, ret)) goto err_put_syncwait_obj; @@ -3332,7 +3332,8 @@ group_create_queue(struct panthor_group *group, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "CS ring buffer"); if (IS_ERR(queue->ringbuf)) { ret = PTR_ERR(queue->ringbuf); goto err_free_queue; @@ -3362,7 +3363,8 @@ group_create_queue(struct panthor_group *group, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "Group job stats"); if (IS_ERR(queue->profiling.slots)) { ret = PTR_ERR(queue->profiling.slots); @@ -3493,7 +3495,8 @@ int panthor_group_create(struct panthor_file *pfile, DRM_PANTHOR_BO_NO_MMAP, DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED, - PANTHOR_VM_KERNEL_AUTO_VA); + PANTHOR_VM_KERNEL_AUTO_VA, + "Group sync objects"); if (IS_ERR(group->syncobjs)) { ret = PTR_ERR(group->syncobjs); goto err_put_group; diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index 1e4b28d03f4d..5f460b296c0c 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) * if we find it, it will take precedence. This is on the Integrator/AP * which only has this option for PL110 graphics. */ - if (versatile_clcd_type == INTEGRATOR_CLCD_CM) { + if (versatile_clcd_type == INTEGRATOR_CLCD_CM) { np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match, &clcd_id); if (np) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fa78824931cc..3f3c360dce4b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) u8 link_status[DP_LINK_STATUS_SIZE]; struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status) - <= 0) + if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, + link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; @@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c index 380a855b832a..a9145253294f 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c @@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge, } static int rcar_lvds_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); @@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge, if (!lvds->next_bridge) return 0; - return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge, + return drm_bridge_attach(encoder, lvds->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index d1e626068065..7ab8be46c7f6 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi) */ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); - return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + return drm_bridge_attach(encoder, dsi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig index 7c1817240846..e57536fd6f4d 100644 --- a/drivers/gpu/drm/renesas/rz-du/Kconfig +++ b/drivers/gpu/drm/renesas/rz-du/Kconfig @@ -14,10 +14,15 @@ config DRM_RZG2L_DU Choose this option if you have an RZ/G2L alike chipset. If M is selected the module will be called rzg2l-du-drm. -config DRM_RZG2L_MIPI_DSI - tristate "RZ/G2L MIPI DSI Encoder Support" - depends on DRM && DRM_BRIDGE && OF - depends on ARCH_RENESAS || COMPILE_TEST - select DRM_MIPI_DSI +config DRM_RZG2L_USE_MIPI_DSI + bool "RZ/G2L MIPI DSI Encoder Support" + depends on DRM_BRIDGE && OF + depends on DRM_RZG2L_DU || COMPILE_TEST + default DRM_RZG2L_DU help Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders. + +config DRM_RZG2L_MIPI_DSI + def_tristate DRM_RZG2L_DU + depends on DRM_RZG2L_USE_MIPI_DSI + select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c index cbd9b9841267..5e40f0c1e7b0 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c @@ -79,7 +79,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops); static const struct drm_driver rzg2l_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .dumb_create = rzg2l_du_dumb_create, + DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(rzg2l_du_dumb_create), DRM_FBDEV_DMA_DRIVER_OPS, .fops = &rzg2l_du_fops, .name = "rzg2l-du", diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c index 90c6269ccd29..55a97691e9b2 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c @@ -36,23 +36,129 @@ static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = { { - .fourcc = DRM_FORMAT_XRGB8888, - .v4l2 = V4L2_PIX_FMT_XBGR32, - .bpp = 32, + .fourcc = DRM_FORMAT_RGB332, + .v4l2 = V4L2_PIX_FMT_RGB332, .planes = 1, .hsub = 1, }, { - .fourcc = DRM_FORMAT_ARGB8888, - .v4l2 = V4L2_PIX_FMT_ABGR32, - .bpp = 32, + .fourcc = DRM_FORMAT_ARGB4444, + .v4l2 = V4L2_PIX_FMT_ARGB444, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_XRGB4444, + .v4l2 = V4L2_PIX_FMT_XRGB444, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_ARGB1555, + .v4l2 = V4L2_PIX_FMT_ARGB555, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_XRGB1555, + .v4l2 = V4L2_PIX_FMT_XRGB555, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_RGB565, + .v4l2 = V4L2_PIX_FMT_RGB565, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_BGR888, + .v4l2 = V4L2_PIX_FMT_RGB24, .planes = 1, .hsub = 1, }, { .fourcc = DRM_FORMAT_RGB888, .v4l2 = V4L2_PIX_FMT_BGR24, - .bpp = 24, .planes = 1, .hsub = 1, + }, { + .fourcc = DRM_FORMAT_BGRA8888, + .v4l2 = V4L2_PIX_FMT_ARGB32, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_BGRX8888, + .v4l2 = V4L2_PIX_FMT_XRGB32, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_ARGB8888, + .v4l2 = V4L2_PIX_FMT_ABGR32, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_XRGB8888, + .v4l2 = V4L2_PIX_FMT_XBGR32, + .planes = 1, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_UYVY, + .v4l2 = V4L2_PIX_FMT_UYVY, + .planes = 1, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YUYV, + .v4l2 = V4L2_PIX_FMT_YUYV, + .planes = 1, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YVYU, + .v4l2 = V4L2_PIX_FMT_YVYU, + .planes = 1, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_NV12, + .v4l2 = V4L2_PIX_FMT_NV12M, + .planes = 2, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_NV21, + .v4l2 = V4L2_PIX_FMT_NV21M, + .planes = 2, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_NV16, + .v4l2 = V4L2_PIX_FMT_NV16M, + .planes = 2, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_NV61, + .v4l2 = V4L2_PIX_FMT_NV61M, + .planes = 2, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YUV420, + .v4l2 = V4L2_PIX_FMT_YUV420M, + .planes = 3, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YVU420, + .v4l2 = V4L2_PIX_FMT_YVU420M, + .planes = 3, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YUV422, + .v4l2 = V4L2_PIX_FMT_YUV422M, + .planes = 3, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YVU422, + .v4l2 = V4L2_PIX_FMT_YVU422M, + .planes = 3, + .hsub = 2, + }, { + .fourcc = DRM_FORMAT_YUV444, + .v4l2 = V4L2_PIX_FMT_YUV444M, + .planes = 3, + .hsub = 1, + }, { + .fourcc = DRM_FORMAT_YVU444, + .v4l2 = V4L2_PIX_FMT_YVU444M, + .planes = 3, + .hsub = 1, } }; diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h index 876e97cfbf45..e2c599f115c6 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h @@ -23,7 +23,6 @@ struct sg_table; struct rzg2l_du_format_info { u32 fourcc; u32 v4l2; - unsigned int bpp; unsigned int planes; unsigned int hsub; }; diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c index 8643ff2eec46..040d4e4aff00 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c @@ -340,6 +340,15 @@ int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np, drm_plane_helper_add(&plane->plane, &rzg2l_du_vsp_plane_helper_funcs); + + drm_plane_create_alpha_property(&plane->plane); + drm_plane_create_zpos_property(&plane->plane, i, 0, + num_planes - 1); + + drm_plane_create_blend_mode_property(&plane->plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); } return 0; diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c index 4550c6d84796..dc6ab012cdb6 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c @@ -479,7 +479,7 @@ static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi) u32 status; int ret; - /* Configuration for Blanking sequence and start video input*/ + /* Configuration for Blanking sequence and start video input */ vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP | VICH1SET0R_HSANOLP | VICH1SET0R_VSTART; rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r); @@ -523,11 +523,12 @@ err: */ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge); - return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + return drm_bridge_attach(encoder, dsi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 26c4410b2407..f9d7776a859a 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -8,6 +8,7 @@ config DRM_ROCKCHIP select DRM_PANEL select VIDEOMODE_HELPERS select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP + select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index a8265a1bf9ff..d30f0983a53a 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -21,6 +21,7 @@ #include <video/of_videomode.h> #include <video/videomode.h> +#include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -51,11 +52,13 @@ struct rockchip_grf_reg_field { /** * struct rockchip_dp_chip_data - splite the grf setting of kind of chips * @lcdc_sel: grf register field of lcdc_sel + * @edp_mode: grf register field of edp_mode * @chip_type: specific chip type * @reg: register base address */ struct rockchip_dp_chip_data { const struct rockchip_grf_reg_field lcdc_sel; + const struct rockchip_grf_reg_field edp_mode; u32 chip_type; u32 reg; }; @@ -70,6 +73,7 @@ struct rockchip_dp_device { struct clk *grfclk; struct regmap *grf; struct reset_control *rst; + struct reset_control *apbrst; const struct rockchip_dp_chip_data *data; @@ -115,6 +119,10 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) usleep_range(10, 20); reset_control_deassert(dp->rst); + reset_control_assert(dp->apbrst); + usleep_range(10, 20); + reset_control_deassert(dp->apbrst); + return 0; } @@ -136,12 +144,21 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data) return ret; } + ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 1); + if (ret != 0) + DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret); + return ret; } static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data) { struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data); + int ret; + + ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 0); + if (ret != 0) + DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret); clk_disable_unprepare(dp->pclk); @@ -205,6 +222,10 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder, struct rockchip_dp_device *dp = encoder_to_dp(encoder); struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; + struct of_endpoint endpoint; + struct device_node *remote_port, *remote_port_parent; + char name[32]; + u32 port_id; int ret; crtc = rockchip_dp_drm_get_new_crtc(encoder, state); @@ -222,13 +243,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder, return; } - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); + ret = drm_of_encoder_active_endpoint(dp->dev->of_node, encoder, &endpoint); if (ret < 0) return; - DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); + remote_port_parent = of_graph_get_remote_port_parent(endpoint.local_node); + if (remote_port_parent) { + if (of_get_child_by_name(remote_port_parent, "ports")) { + remote_port = of_graph_get_remote_port(endpoint.local_node); + of_property_read_u32(remote_port, "reg", &port_id); + of_node_put(remote_port); + sprintf(name, "%s vp%d", remote_port_parent->full_name, port_id); + } else { + sprintf(name, "%s %s", + remote_port_parent->full_name, endpoint.id ? "vopl" : "vopb"); + } + of_node_put(remote_port_parent); + + DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); + } - ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret); + ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, endpoint.id); if (ret != 0) DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); @@ -322,6 +357,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp) return PTR_ERR(dp->rst); } + dp->apbrst = devm_reset_control_get_optional(dev, "apb"); + if (IS_ERR(dp->apbrst)) { + DRM_DEV_ERROR(dev, "failed to get apb reset control\n"); + return PTR_ERR(dp->apbrst); + } + return 0; } @@ -392,11 +433,28 @@ static const struct component_ops rockchip_dp_component_ops = { .unbind = rockchip_dp_unbind, }; +static int rockchip_dp_link_panel(struct drm_dp_aux *aux) +{ + struct analogix_dp_plat_data *plat_data = analogix_dp_aux_to_plat_data(aux); + struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data); + int ret; + + /* + * If drm_of_find_panel_or_bridge() returns -ENODEV, there may be no valid panel + * or bridge nodes. The driver should go on for the driver-free bridge or the DP + * mode applications. + */ + ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 1, 0, &plat_data->panel, NULL); + if (ret && ret != -ENODEV) + return ret; + + return component_add(dp->dev, &rockchip_dp_component_ops); +} + static int rockchip_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct rockchip_dp_chip_data *dp_data; - struct drm_panel *panel = NULL; struct rockchip_dp_device *dp; struct resource *res; int i; @@ -406,10 +464,6 @@ static int rockchip_dp_probe(struct platform_device *pdev) if (!dp_data) return -ENODEV; - ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); - if (ret < 0 && ret != -ENODEV) - return ret; - dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); if (!dp) return -ENOMEM; @@ -432,7 +486,6 @@ static int rockchip_dp_probe(struct platform_device *pdev) dp->dev = dev; dp->adp = ERR_PTR(-ENODEV); - dp->plat_data.panel = panel; dp->plat_data.dev_type = dp->data->chip_type; dp->plat_data.power_on = rockchip_dp_poweron; dp->plat_data.power_off = rockchip_dp_powerdown; @@ -448,9 +501,20 @@ static int rockchip_dp_probe(struct platform_device *pdev) if (IS_ERR(dp->adp)) return PTR_ERR(dp->adp); - ret = component_add(dev, &rockchip_dp_component_ops); - if (ret) - return ret; + ret = devm_of_dp_aux_populate_bus(analogix_dp_get_aux(dp->adp), rockchip_dp_link_panel); + if (ret) { + /* + * If devm_of_dp_aux_populate_bus() returns -ENODEV, the done_probing() will not + * be called because there are no EP devices. Then the rockchip_dp_link_panel() + * will be called directly in order to support the other valid DT configurations. + * + * NOTE: The devm_of_dp_aux_populate_bus() is allowed to return -EPROBE_DEFER. + */ + if (ret != -ENODEV) + return dev_err_probe(dp->dev, ret, "failed to populate aux bus\n"); + + return rockchip_dp_link_panel(analogix_dp_get_aux(dp->adp)); + } return 0; } @@ -501,9 +565,24 @@ static const struct rockchip_dp_chip_data rk3288_dp[] = { { /* sentinel */ } }; +static const struct rockchip_dp_chip_data rk3588_edp[] = { + { + .edp_mode = GRF_REG_FIELD(0x0000, 0, 0), + .chip_type = RK3588_EDP, + .reg = 0xfdec0000, + }, + { + .edp_mode = GRF_REG_FIELD(0x0004, 0, 0), + .chip_type = RK3588_EDP, + .reg = 0xfded0000, + }, + { /* sentinel */ } +}; + static const struct of_device_id rockchip_dp_dt_ids[] = { {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp }, {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp }, + {.compatible = "rockchip,rk3588-edp", .data = &rk3588_edp }, {} }; MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index e3596e2b557d..ba6b0528d1e5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -733,11 +733,10 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc, WARN_ON(vop->event); - if (crtc->state->self_refresh_active) + if (crtc->state->self_refresh_active) { rockchip_drm_set_win_enabled(crtc, false); - - if (crtc->state->self_refresh_active) goto out; + } mutex_lock(&vop->vop_lock); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index 680bedbb770e..fc3ecb9fcd95 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -710,6 +710,7 @@ enum dst_factor_mode { #define VOP2_COLOR_KEY_MASK BIT(31) +#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL GENMASK(31, 30) #define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28) #define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp) diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 0a2840cbe8e2..32c4ed685739 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -2070,7 +2070,10 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); - ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; + ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; + ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL; + ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id); + if (vcstate->yuv_overlay) ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); else diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 4e2099d86517..d1f788763318 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -906,21 +906,21 @@ static const struct vop_data rk3366_vop = { static const struct vop_output rk3399_output = { .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19), - .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19), - .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23), - .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27), - .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31), + .rgb_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 31), .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16), - .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16), - .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20), - .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24), - .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28), + .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 28), .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11), - .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12), - .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13), - .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14), - .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15), - .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3), + .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12), + .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13), + .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14), + .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15), + .mipi_dual_channel_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 3), }; static const struct vop_common rk3399_common = { @@ -975,23 +975,23 @@ static const struct vop_win_phy rk3399_win0_data = { .data_formats = formats_win_full_10, .nformats = ARRAY_SIZE(formats_win_full_10), .format_modifiers = format_modifiers_win_full_afbc, - .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), - .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), - .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4), - .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), - .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15), - .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21), - .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), - .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), - .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), - .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0), - .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0), - .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0), - .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), - .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), - .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), - .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0), + .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0), + .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1), + .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4), + .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12), + .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15), + .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21), + .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22), + .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0), + .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0), + .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16), + .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0), + .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0), + .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0), }; static const struct vop_win_phy rk3399_win1_data = { @@ -999,23 +999,23 @@ static const struct vop_win_phy rk3399_win1_data = { .data_formats = formats_win_full_10, .nformats = ARRAY_SIZE(formats_win_full_10), .format_modifiers = format_modifiers_win_full, - .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), - .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), - .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4), - .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), - .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15), - .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21), - .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), - .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), - .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), - .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0), - .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0), - .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0), - .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), - .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), - .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), - .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0), + .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0), + .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1), + .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4), + .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12), + .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15), + .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21), + .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22), + .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0), + .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0), + .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16), + .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0), + .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0), + .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0), }; /* diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig new file mode 100644 index 000000000000..cece53609fcf --- /dev/null +++ b/drivers/gpu/drm/scheduler/.kunitconfig @@ -0,0 +1,12 @@ +CONFIG_KUNIT=y +CONFIG_DRM=y +CONFIG_DRM_SCHED_KUNIT_TEST=y +CONFIG_EXPERT=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +CONFIG_LOCKDEP=y +CONFIG_DEBUG_LOCKDEP=y +CONFIG_DEBUG_LIST=y diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile index 53863621829f..6e13e4c63e9d 100644 --- a/drivers/gpu/drm/scheduler/Makefile +++ b/drivers/gpu/drm/scheduler/Makefile @@ -23,3 +23,5 @@ gpu-sched-y := sched_main.o sched_fence.o sched_entity.o obj-$(CONFIG_DRM_SCHED) += gpu-sched.o + +obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index bfea608a7106..829579c41c6b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -828,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init); * * This arms a scheduler job for execution. Specifically it initializes the * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv - * or other places that need to track the completion of this job. + * or other places that need to track the completion of this job. It also + * initializes sequence numbers, which are fundamental for fence ordering. * * Refer to drm_sched_entity_push_job() documentation for locking * considerations. * + * Once this function was called, you *must* submit @job with + * drm_sched_entity_push_job(). + * * This can only be called if drm_sched_job_init() succeeded. */ void drm_sched_job_arm(struct drm_sched_job *job) @@ -1015,13 +1019,14 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency); * Cleans up the resources allocated with drm_sched_job_init(). * * Drivers should call this from their error unwind code if @job is aborted - * before it was submitted to an entity with drm_sched_entity_push_job(). + * before drm_sched_job_arm() is called. * - * Since calling drm_sched_job_arm() causes the job's fences to be initialized, - * it is up to the driver to ensure that fences that were exposed to external - * parties get signaled. drm_sched_job_cleanup() does not ensure this. + * drm_sched_job_arm() is a point of no return since it initializes the fences + * and their sequence number etc. Once that function has been called, you *must* + * submit it with drm_sched_entity_push_job() and cannot simply abort it by + * calling drm_sched_job_cleanup(). * - * This function must also be called in &struct drm_sched_backend_ops.free_job + * This function should be called in the &drm_sched_backend_ops.free_job callback. */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -1029,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) unsigned long index; if (kref_read(&job->s_fence->finished.refcount)) { - /* drm_sched_job_arm() has been called */ + /* The job has been processed by the scheduler, i.e., + * drm_sched_job_arm() and drm_sched_entity_push_job() have + * been called. + */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before arming */ + /* The job was aborted before it has been committed to be run; + * notably, drm_sched_job_arm() has not been called. + */ drm_sched_fence_free(job->s_fence); } @@ -1220,20 +1230,23 @@ static void drm_sched_run_job_work(struct work_struct *w) drm_sched_job_begin(sched_job); trace_drm_run_job(sched_job, entity); + /* + * The run_job() callback must by definition return a fence whose + * refcount has been incremented for the scheduler already. + */ fence = sched->ops->run_job(sched_job); complete_all(&entity->entity_idle); drm_sched_fence_scheduled(s_fence, fence); if (!IS_ERR_OR_NULL(fence)) { - /* Drop for original kref_init of the fence */ - dma_fence_put(fence); - r = dma_fence_add_callback(fence, &sched_job->cb, drm_sched_job_done_cb); if (r == -ENOENT) drm_sched_job_done(sched_job, fence->error); else if (r) DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); + + dma_fence_put(fence); } else { drm_sched_job_done(sched_job, IS_ERR(fence) ? PTR_ERR(fence) : 0); diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile new file mode 100644 index 000000000000..5bf707bad373 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +drm-sched-tests-y := \ + mock_scheduler.o \ + tests_basic.o + +obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c new file mode 100644 index 000000000000..f999c8859cf7 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Valve Corporation */ + +#include "sched_tests.h" + +/* + * Here we implement the mock "GPU" (or the scheduler backend) which is used by + * the DRM scheduler unit tests in order to exercise the core functionality. + * + * Test cases are implemented in a separate file. + */ + +/** + * drm_mock_sched_entity_new - Create a new mock scheduler entity + * + * @test: KUnit test owning the entity + * @priority: Scheduling priority + * @sched: Mock scheduler on which the entity can be scheduled + * + * Returns: New mock scheduler entity with allocation managed by the test + */ +struct drm_mock_sched_entity * +drm_mock_sched_entity_new(struct kunit *test, + enum drm_sched_priority priority, + struct drm_mock_scheduler *sched) +{ + struct drm_mock_sched_entity *entity; + struct drm_gpu_scheduler *drm_sched; + int ret; + + entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, entity); + + drm_sched = &sched->base; + ret = drm_sched_entity_init(&entity->base, + priority, + &drm_sched, 1, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + entity->test = test; + + return entity; +} + +/** + * drm_mock_sched_entity_free - Destroys a mock scheduler entity + * + * @entity: Entity to destroy + * + * To be used from the test cases once done with the entity. + */ +void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity) +{ + drm_sched_entity_destroy(&entity->base); +} + +static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(job->base.sched); + + lockdep_assert_held(&sched->lock); + + job->flags |= DRM_MOCK_SCHED_JOB_DONE; + list_move_tail(&job->link, &sched->done_list); + dma_fence_signal(&job->hw_fence); + complete(&job->done); +} + +static enum hrtimer_restart +drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer) +{ + struct drm_mock_sched_job *job = + container_of(hrtimer, typeof(*job), timer); + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(job->base.sched); + struct drm_mock_sched_job *next; + ktime_t now = ktime_get(); + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->job_list, link) { + if (!job->duration_us) + break; + + if (ktime_before(now, job->finish_at)) + break; + + sched->hw_timeline.cur_seqno = job->hw_fence.seqno; + drm_mock_sched_job_complete(job); + } + spin_unlock_irqrestore(&sched->lock, flags); + + return HRTIMER_NORESTART; +} + +/** + * drm_mock_sched_job_new - Create a new mock scheduler job + * + * @test: KUnit test owning the job + * @entity: Scheduler entity of the job + * + * Returns: New mock scheduler job with allocation managed by the test + */ +struct drm_mock_sched_job * +drm_mock_sched_job_new(struct kunit *test, + struct drm_mock_sched_entity *entity) +{ + struct drm_mock_sched_job *job; + int ret; + + job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, job); + + ret = drm_sched_job_init(&job->base, + &entity->base, + 1, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + job->test = test; + + init_completion(&job->done); + spin_lock_init(&job->lock); + INIT_LIST_HEAD(&job->link); + hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + + return job; +} + +static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence) +{ + return "drm_mock_sched"; +} + +static const char * +drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence) +{ + struct drm_mock_sched_job *job = + container_of(fence, typeof(*job), hw_fence); + + return (const char *)job->base.sched->name; +} + +static void drm_mock_sched_hw_fence_release(struct dma_fence *fence) +{ + struct drm_mock_sched_job *job = + container_of(fence, typeof(*job), hw_fence); + + hrtimer_cancel(&job->timer); + + /* Containing job is freed by the kunit framework */ +} + +static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = { + .get_driver_name = drm_mock_sched_hw_fence_driver_name, + .get_timeline_name = drm_mock_sched_hw_fence_timeline_name, + .release = drm_mock_sched_hw_fence_release, +}; + +static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + + dma_fence_init(&job->hw_fence, + &drm_mock_sched_hw_fence_ops, + &job->lock, + sched->hw_timeline.context, + atomic_inc_return(&sched->hw_timeline.next_seqno)); + + dma_fence_get(&job->hw_fence); /* Reference for the job_list */ + + spin_lock_irq(&sched->lock); + if (job->duration_us) { + ktime_t prev_finish_at = 0; + + if (!list_empty(&sched->job_list)) { + struct drm_mock_sched_job *prev = + list_last_entry(&sched->job_list, typeof(*prev), + link); + + prev_finish_at = prev->finish_at; + } + + if (!prev_finish_at) + prev_finish_at = ktime_get(); + + job->finish_at = ktime_add_us(prev_finish_at, job->duration_us); + } + list_add_tail(&job->link, &sched->job_list); + if (job->finish_at) + hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS); + spin_unlock_irq(&sched->lock); + + return &job->hw_fence; +} + +static enum drm_gpu_sched_stat +mock_sched_timedout_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + + job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT; + + return DRM_GPU_SCHED_STAT_NOMINAL; +} + +static void mock_sched_free_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + unsigned long flags; + + /* Remove from the scheduler done list. */ + spin_lock_irqsave(&sched->lock, flags); + list_del(&job->link); + spin_unlock_irqrestore(&sched->lock, flags); + dma_fence_put(&job->hw_fence); + + drm_sched_job_cleanup(sched_job); + + /* Mock job itself is freed by the kunit framework. */ +} + +static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { + .run_job = mock_sched_run_job, + .timedout_job = mock_sched_timedout_job, + .free_job = mock_sched_free_job +}; + +/** + * drm_mock_sched_new - Create a new mock scheduler + * + * @test: KUnit test owning the job + * @timeout: Job timeout to set + * + * Returns: New mock scheduler with allocation managed by the test + */ +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) +{ + struct drm_sched_init_args args = { + .ops = &drm_mock_scheduler_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = U32_MAX, + .hang_limit = 1, + .timeout = timeout, + .name = "drm-mock-scheduler", + }; + struct drm_mock_scheduler *sched; + int ret; + + sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, sched); + + ret = drm_sched_init(&sched->base, &args); + KUNIT_ASSERT_EQ(test, ret, 0); + + sched->test = test; + sched->hw_timeline.context = dma_fence_context_alloc(1); + atomic_set(&sched->hw_timeline.next_seqno, 0); + INIT_LIST_HEAD(&sched->job_list); + INIT_LIST_HEAD(&sched->done_list); + spin_lock_init(&sched->lock); + + return sched; +} + +/** + * drm_mock_sched_fini - Destroys a mock scheduler + * + * @sched: Scheduler to destroy + * + * To be used from the test cases once done with the scheduler. + */ +void drm_mock_sched_fini(struct drm_mock_scheduler *sched) +{ + struct drm_mock_sched_job *job, *next; + unsigned long flags; + LIST_HEAD(list); + + drm_sched_wqueue_stop(&sched->base); + + /* Force complete all unfinished jobs. */ + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->job_list, link) + list_move_tail(&job->link, &list); + spin_unlock_irqrestore(&sched->lock, flags); + + list_for_each_entry(job, &list, link) + hrtimer_cancel(&job->timer); + + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &list, link) + drm_mock_sched_job_complete(job); + spin_unlock_irqrestore(&sched->lock, flags); + + /* + * Free completed jobs and jobs not yet processed by the DRM scheduler + * free worker. + */ + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->done_list, link) + list_move_tail(&job->link, &list); + spin_unlock_irqrestore(&sched->lock, flags); + + list_for_each_entry_safe(job, next, &list, link) + mock_sched_free_job(&job->base); + + drm_sched_fini(&sched->base); +} + +/** + * drm_mock_sched_advance - Advances the mock scheduler timeline + * + * @sched: Scheduler timeline to advance + * @num: By how many jobs to advance + * + * Advancing the scheduler timeline by a number of seqnos will trigger + * signalling of the hardware fences and unlinking the jobs from the internal + * scheduler tracking. + * + * This can be used from test cases which want complete control of the simulated + * job execution timing. For example submitting one job with no set duration + * would never complete it before test cases advances the timeline by one. + */ +unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, + unsigned int num) +{ + struct drm_mock_sched_job *job, *next; + unsigned int found = 0; + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&sched->lock, flags); + if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num < + sched->hw_timeline.cur_seqno)) + goto unlock; + sched->hw_timeline.cur_seqno += num; + list_for_each_entry_safe(job, next, &sched->job_list, link) { + if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno) + break; + + drm_mock_sched_job_complete(job); + found++; + } +unlock: + spin_unlock_irqrestore(&sched->lock, flags); + + return found; +} + +MODULE_DESCRIPTION("DRM mock scheduler and tests"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h new file mode 100644 index 000000000000..27caf8285fb7 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Valve Corporation */ + +#ifndef _SCHED_TESTS_H_ +#define _SCHED_TESTS_H_ + +#include <kunit/test.h> +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/dma-fence.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/atomic.h> +#include <linux/mutex.h> +#include <linux/types.h> + +#include <drm/gpu_scheduler.h> + +/* + * DOC: Mock DRM scheduler data structures + * + * drm_mock_* data structures are used to implement a mock "GPU". + * + * They subclass the core DRM scheduler objects and add their data on top, which + * enables tracking the submitted jobs and simulating their execution with the + * attributes as specified by the test case. + */ + +/** + * struct drm_mock_scheduler - implements a trivial mock GPU execution engine + * + * @base: DRM scheduler base class + * @test: Backpointer to owning the kunit test case + * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list + * @job_list: List of jobs submitted to the mock GPU + * @done_list: List of jobs completed by the mock GPU + * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and + * @cur_seqno for implementing a struct dma_fence signaling the + * simulated job completion. + * + * Trivial mock GPU execution engine tracks submitted jobs and enables + * completing them strictly in submission order. + */ +struct drm_mock_scheduler { + struct drm_gpu_scheduler base; + + struct kunit *test; + + spinlock_t lock; + struct list_head job_list; + struct list_head done_list; + + struct { + u64 context; + atomic_t next_seqno; + unsigned int cur_seqno; + } hw_timeline; +}; + +/** + * struct drm_mock_sched_entity - implements a mock GPU sched entity + * + * @base: DRM scheduler entity base class + * @test: Backpointer to owning the kunit test case + * + * Mock GPU sched entity is used by the test cases to submit jobs to the mock + * scheduler. + */ +struct drm_mock_sched_entity { + struct drm_sched_entity base; + + struct kunit *test; +}; + +/** + * struct drm_mock_sched_job - implements a mock GPU job + * + * @base: DRM sched job base class + * @done: Completion signaling job completion. + * @flags: Flags designating job state. + * @link: List head element used by job tracking by the drm_mock_scheduler + * @timer: Timer used for simulating job execution duration + * @duration_us: Simulated job duration in micro seconds, or zero if in manual + * timeline advance mode + * @finish_at: Absolute time when the jobs with set duration will complete + * @lock: Lock used for @hw_fence + * @hw_fence: Fence returned to DRM scheduler as the hardware fence + * @test: Backpointer to owning the kunit test case + * + * Mock GPU sched job is used by the test cases to submit jobs to the mock + * scheduler. + */ +struct drm_mock_sched_job { + struct drm_sched_job base; + + struct completion done; + +#define DRM_MOCK_SCHED_JOB_DONE 0x1 +#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 + unsigned long flags; + + struct list_head link; + struct hrtimer timer; + + unsigned int duration_us; + ktime_t finish_at; + + spinlock_t lock; + struct dma_fence hw_fence; + + struct kunit *test; +}; + +static inline struct drm_mock_scheduler * +drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched) +{ + return container_of(sched, struct drm_mock_scheduler, base); +}; + +static inline struct drm_mock_sched_entity * +drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity) +{ + return container_of(sched_entity, struct drm_mock_sched_entity, base); +}; + +static inline struct drm_mock_sched_job * +drm_sched_job_to_mock_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct drm_mock_sched_job, base); +}; + +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, + long timeout); +void drm_mock_sched_fini(struct drm_mock_scheduler *sched); +unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, + unsigned int num); + +struct drm_mock_sched_entity * +drm_mock_sched_entity_new(struct kunit *test, + enum drm_sched_priority priority, + struct drm_mock_scheduler *sched); +void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity); + +struct drm_mock_sched_job * +drm_mock_sched_job_new(struct kunit *test, + struct drm_mock_sched_entity *entity); + +/** + * drm_mock_sched_job_submit - Arm and submit a job in one go + * + * @job: Job to arm and submit + */ +static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job) +{ + drm_sched_job_arm(&job->base); + drm_sched_entity_push_job(&job->base); +} + +/** + * drm_mock_sched_job_set_duration_us - Set a job duration + * + * @job: Job to set the duration for + * @duration_us: Duration in micro seconds + * + * Jobs with duration set will be automatically completed by the mock scheduler + * as the timeline progresses, unless a job without a set duration is + * encountered in the timelime in which case calling drm_mock_sched_advance() + * will be required to bump the timeline. + */ +static inline void +drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job, + unsigned int duration_us) +{ + job->duration_us = duration_us; +} + +/** + * drm_mock_sched_job_is_finished - Check if a job is finished + * + * @job: Job to check + * + * Returns: true if finished + */ +static inline bool +drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job) +{ + return job->flags & DRM_MOCK_SCHED_JOB_DONE; +} + +/** + * drm_mock_sched_job_wait_finished - Wait until a job is finished + * + * @job: Job to wait for + * @timeout: Wait time in jiffies + * + * Returns: true if finished within the timeout provided, otherwise false + */ +static inline bool +drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout) +{ + if (job->flags & DRM_MOCK_SCHED_JOB_DONE) + return true; + + return wait_for_completion_timeout(&job->done, timeout) != 0; +} + +/** + * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled + * + * @job: Job to wait for + * @timeout: Wait time in jiffies + * + * Returns: true if scheduled within the timeout provided, otherwise false + */ +static inline bool +drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout) +{ + KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0); + + return dma_fence_wait_timeout(&job->base.s_fence->scheduled, + false, + timeout) != 0; +} + +#endif diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c new file mode 100644 index 000000000000..7230057e0594 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Valve Corporation */ + +#include <linux/delay.h> + +#include "sched_tests.h" + +/* + * DRM scheduler basic tests should check the basic functional correctness of + * the scheduler, including some very light smoke testing. More targeted tests, + * for example focusing on testing specific bugs and other more complicated test + * scenarios, should be implemented in separate source units. + */ + +static int drm_sched_basic_init(struct kunit *test) +{ + test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + + return 0; +} + +static void drm_sched_basic_exit(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + + drm_mock_sched_fini(sched); +} + +static int drm_sched_timeout_init(struct kunit *test) +{ + test->priv = drm_mock_sched_new(test, HZ); + + return 0; +} + +static void drm_sched_basic_submit(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + unsigned int i; + bool done; + + /* + * Submit one job to the scheduler and verify that it gets scheduled + * and completed only when the mock hw backend processes it. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, HZ / 2); + KUNIT_ASSERT_FALSE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); +} + +struct drm_sched_basic_params { + const char *description; + unsigned int queue_depth; + unsigned int num_entities; + unsigned int job_us; + bool dep_chain; +}; + +static const struct drm_sched_basic_params drm_sched_basic_cases[] = { + { + .description = "A queue of jobs in a single entity", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 1, + }, + { + .description = "A chain of dependent jobs across multiple entities", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 1, + .dep_chain = true, + }, + { + .description = "Multiple independent job queues", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 4, + }, + { + .description = "Multiple inter-dependent job queues", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 4, + .dep_chain = true, + }, +}; + +static void +drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc) +{ + strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc); + +static void drm_sched_basic_test(struct kunit *test) +{ + const struct drm_sched_basic_params *params = test->param_value; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job, *prev = NULL; + struct drm_mock_sched_entity **entity; + unsigned int i, cur_ent = 0; + bool done; + + entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity), + GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, entity); + + for (i = 0; i < params->num_entities; i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + for (i = 0; i < params->queue_depth; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= params->num_entities; + drm_mock_sched_job_set_duration_us(job, params->job_us); + if (params->dep_chain && prev) + drm_sched_job_add_dependency(&job->base, + dma_fence_get(&prev->base.s_fence->finished)); + drm_mock_sched_job_submit(job); + prev = job; + } + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + for (i = 0; i < params->num_entities; i++) + drm_mock_sched_entity_free(entity[i]); +} + +static void drm_sched_basic_entity_cleanup(struct kunit *test) +{ + struct drm_mock_sched_job *job, *mid, *prev = NULL; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity[4]; + const unsigned int qd = 100; + unsigned int i, cur_ent = 0; + bool done; + + /* + * Submit a queue of jobs across different entities with an explicit + * chain of dependencies between them and trigger entity cleanup while + * the queue is still being processed. + */ + + for (i = 0; i < ARRAY_SIZE(entity); i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + if (prev) + drm_sched_job_add_dependency(&job->base, + dma_fence_get(&prev->base.s_fence->finished)); + drm_mock_sched_job_submit(job); + if (i == qd / 2) + mid = job; + prev = job; + } + + done = drm_mock_sched_job_wait_finished(mid, HZ); + KUNIT_ASSERT_TRUE(test, done); + + /* Exit with half of the queue still pending to be executed. */ + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static struct kunit_case drm_sched_basic_tests[] = { + KUNIT_CASE(drm_sched_basic_submit), + KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params), + KUNIT_CASE(drm_sched_basic_entity_cleanup), + {} +}; + +static struct kunit_suite drm_sched_basic = { + .name = "drm_sched_basic_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_basic_tests, +}; + +static void drm_sched_basic_timeout(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + bool done; + + /* + * Submit a single job against a scheduler with the timeout configured + * and verify that the timeout handling will run if the backend fails + * to complete it in time. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, HZ / 2); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, + 0); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, + DRM_MOCK_SCHED_JOB_TIMEDOUT); + + drm_mock_sched_entity_free(entity); +} + +static struct kunit_case drm_sched_timeout_tests[] = { + KUNIT_CASE(drm_sched_basic_timeout), + {} +}; + +static struct kunit_suite drm_sched_timeout = { + .name = "drm_sched_basic_timeout_tests", + .init = drm_sched_timeout_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_timeout_tests, +}; + +static void drm_sched_priorities(struct kunit *test) +{ + struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT]; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job; + const unsigned int qd = 100; + unsigned int i, cur_ent = 0; + enum drm_sched_priority p; + bool done; + + /* + * Submit a bunch of jobs against entities configured with different + * priorities. + */ + + BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW); + BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT); + + for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++) + entity[p] = drm_mock_sched_entity_new(test, p, sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static void drm_sched_change_priority(struct kunit *test) +{ + struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT]; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job; + const unsigned int qd = 1000; + unsigned int i, cur_ent = 0; + enum drm_sched_priority p; + + /* + * Submit a bunch of jobs against entities configured with different + * priorities and while waiting for them to complete, periodically keep + * changing their priorities. + * + * We set up the queue-depth (qd) and job duration so the priority + * changing loop has some time to interact with submissions to the + * backend and job completions as they progress. + */ + + for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++) + entity[p] = drm_mock_sched_entity_new(test, p, sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + do { + drm_sched_entity_set_priority(&entity[cur_ent]->base, + (entity[cur_ent]->base.priority + 1) % + DRM_SCHED_PRIORITY_COUNT); + cur_ent++; + cur_ent %= ARRAY_SIZE(entity); + usleep_range(200, 500); + } while (!drm_mock_sched_job_is_finished(job)); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static struct kunit_case drm_sched_priority_tests[] = { + KUNIT_CASE(drm_sched_priorities), + KUNIT_CASE(drm_sched_change_priority), + {} +}; + +static struct kunit_suite drm_sched_priority = { + .name = "drm_sched_basic_priority_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_priority_tests, +}; + +static void drm_sched_test_modify_sched(struct kunit *test) +{ + unsigned int i, cur_ent = 0, cur_sched = 0; + struct drm_mock_sched_entity *entity[13]; + struct drm_mock_scheduler *sched[3]; + struct drm_mock_sched_job *job; + const unsigned int qd = 1000; + + /* + * Submit a bunch of jobs against entities configured with different + * schedulers and while waiting for them to complete, periodically keep + * changing schedulers associated with each entity. + * + * We set up the queue-depth (qd) and job duration so the sched modify + * loop has some time to interact with submissions to the backend and + * job completions as they progress. + * + * For the number of schedulers and entities we use primes in order to + * perturb the entity->sched assignments with less of a regular pattern. + */ + + for (i = 0; i < ARRAY_SIZE(sched); i++) + sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched[i % ARRAY_SIZE(sched)]); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + do { + struct drm_gpu_scheduler *modify; + + usleep_range(200, 500); + cur_ent++; + cur_ent %= ARRAY_SIZE(entity); + cur_sched++; + cur_sched %= ARRAY_SIZE(sched); + modify = &sched[cur_sched]->base; + drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify, + 1); + } while (!drm_mock_sched_job_is_finished(job)); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); + + for (i = 0; i < ARRAY_SIZE(sched); i++) + drm_mock_sched_fini(sched[i]); +} + +static struct kunit_case drm_sched_modify_sched_tests[] = { + KUNIT_CASE(drm_sched_test_modify_sched), + {} +}; + +static struct kunit_suite drm_sched_modify_sched = { + .name = "drm_sched_basic_modify_sched_tests", + .test_cases = drm_sched_modify_sched_tests, +}; + +static void drm_sched_test_credits(struct kunit *test) +{ + struct drm_mock_sched_entity *entity; + struct drm_mock_scheduler *sched; + struct drm_mock_sched_job *job[2]; + bool done; + int i; + + /* + * Check that the configured credit limit is respected. + */ + + sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + sched->base.credit_limit = 1; + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + job[0] = drm_mock_sched_job_new(test, entity); + job[1] = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job[0]); + drm_mock_sched_job_submit(job[1]); + + done = drm_mock_sched_job_wait_scheduled(job[0], HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_scheduled(job[1], HZ); + KUNIT_ASSERT_FALSE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_scheduled(job[1], HZ); + KUNIT_ASSERT_TRUE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job[1], HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); + drm_mock_sched_fini(sched); +} + +static struct kunit_case drm_sched_credits_tests[] = { + KUNIT_CASE(drm_sched_test_credits), + {} +}; + +static struct kunit_suite drm_sched_credits = { + .name = "drm_sched_basic_credits_tests", + .test_cases = drm_sched_credits_tests, +}; + +kunit_test_suites(&drm_sched_basic, + &drm_sched_timeout, + &drm_sched_priority, + &drm_sched_modify_sched, + &drm_sched_credits); diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c index cb2816985305..a3447622a33c 100644 --- a/drivers/gpu/drm/sprd/sprd_dpu.c +++ b/drivers/gpu/drm/sprd/sprd_dpu.c @@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu, { struct platform_device *pdev = to_platform_device(dev); struct dpu_context *ctx = &dpu->ctx; - struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get I/O resource\n"); - return -EINVAL; - } - - ctx->base = devm_ioremap(dev, res->start, resource_size(res)); - if (!ctx->base) { + ctx->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->base)) { dev_err(dev, "failed to map dpu registers\n"); - return -EFAULT; + return PTR_ERR(ctx->base); } ctx->irq = platform_get_irq(pdev, 0); diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c index 8fc26479bb6b..23b0e1dc547a 100644 --- a/drivers/gpu/drm/sprd/sprd_dsi.c +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi, { struct platform_device *pdev = to_platform_device(dev); struct dsi_context *ctx = &dsi->ctx; - struct resource *res; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get I/O resource\n"); - return -EINVAL; - } - - ctx->base = devm_ioremap(dev, res->start, resource_size(res)); - if (!ctx->base) { + ctx->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->base)) { drm_err(dsi->drm, "failed to map dsi host registers\n"); - return -ENXIO; + return PTR_ERR(ctx->base); } ctx->regmap = devm_regmap_init(dev, ®map_tst_io, dsi, &byte_config); diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 063f82d23d80..8c529b0cca8b 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct device_node *vtg_np; struct sti_compositor *compo; - struct resource *res; unsigned int i; compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); @@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev) memcpy(&compo->data, of_match_node(compositor_of_match, np)->data, sizeof(struct sti_compositor_data)); - - /* Get Memory ressources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - DRM_ERROR("Get memory resource failed\n"); - return -ENXIO; - } - compo->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (compo->regs == NULL) { + compo->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(compo->regs)) { DRM_ERROR("Register mapping failed\n"); - return -ENXIO; + return PTR_ERR(compo->regs); } /* Get clock resources */ diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 4dcddd02629b..74a1eef4674e 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -511,7 +511,6 @@ static int sti_dvo_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sti_dvo *dvo; - struct resource *res; struct device_node *np = dev->of_node; DRM_INFO("%s\n", __func__); @@ -523,16 +522,9 @@ static int sti_dvo_probe(struct platform_device *pdev) } dvo->dev = pdev->dev; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg"); - if (!res) { - DRM_ERROR("Invalid dvo resource\n"); - return -ENOMEM; - } - dvo->regs = devm_ioremap(dev, res->start, - resource_size(res)); - if (!dvo->regs) - return -ENOMEM; + dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg"); + if (IS_ERR(dvo->regs)) + return PTR_ERR(dvo->regs); dvo->clk_pix = devm_clk_get(dev, "dvo_pix"); if (IS_ERR(dvo->clk_pix)) { diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 14fdc00d2ba0..d202b6c1eb8f 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -693,7 +693,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) connector->hda = hda; - bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); + bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); if (!bridge) return -ENOMEM; @@ -750,16 +750,9 @@ static int sti_hda_probe(struct platform_device *pdev) return -ENOMEM; hda->dev = pdev->dev; - - /* Get resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg"); - if (!res) { - DRM_ERROR("Invalid hda resource\n"); - return -ENOMEM; - } - hda->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!hda->regs) - return -ENOMEM; + hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg"); + if (IS_ERR(hda->regs)) + return PTR_ERR(hda->regs); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "video-dacs-ctrl"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 164a34d793d8..37b8d619066e 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1380,7 +1380,6 @@ static int sti_hdmi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sti_hdmi *hdmi; struct device_node *np = dev->of_node; - struct resource *res; struct device_node *ddc; int ret; @@ -1399,17 +1398,9 @@ static int sti_hdmi_probe(struct platform_device *pdev) } hdmi->dev = pdev->dev; - - /* Get resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg"); - if (!res) { - DRM_ERROR("Invalid hdmi resource\n"); - ret = -ENOMEM; - goto release_adapter; - } - hdmi->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!hdmi->regs) { - ret = -ENOMEM; + hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg"); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); goto release_adapter; } diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 0f658709c9d0..03684062309b 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *vtg_np; struct sti_hqvdp *hqvdp; - struct resource *res; DRM_DEBUG_DRIVER("\n"); @@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev) } hqvdp->dev = dev; - - /* Get Memory resources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("Get memory resource failed\n"); - return -ENXIO; - } - hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!hqvdp->regs) { + hqvdp->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hqvdp->regs)) { DRM_ERROR("Register mapping failed\n"); - return -ENXIO; + return PTR_ERR(hqvdp->regs); } /* Get clock resources */ diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index af6c06f448c4..6a464b035de8 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct sti_tvout *tvout; - struct resource *res; DRM_INFO("%s\n", __func__); @@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev) return -ENOMEM; tvout->dev = dev; - - /* get memory resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg"); - if (!res) { - DRM_ERROR("Invalid glue resource\n"); - return -ENOMEM; - } - tvout->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!tvout->regs) - return -ENOMEM; + tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg"); + if (IS_ERR(tvout->regs)) + return PTR_ERR(tvout->regs); /* get reset resources */ tvout->reset = devm_reset_control_get(dev, "tvout"); diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 5ba469b711b5..ee81691b3203 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sti_vtg *vtg; - struct resource *res; int ret; vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL); if (!vtg) return -ENOMEM; - - /* Get Memory ressources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("Get memory resource failed\n"); - return -ENOMEM; - } - vtg->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vtg->regs) { + vtg->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(vtg->regs)) { DRM_ERROR("failed to remap I/O memory\n"); - return -ENOMEM; + return PTR_ERR(vtg->regs); } vtg->irq = platform_get_irq(pdev, 0); diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c index 4613e8e3b8fd..a3ae9a93ce66 100644 --- a/drivers/gpu/drm/stm/lvds.c +++ b/drivers/gpu/drm/stm/lvds.c @@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int lvds_attach(struct drm_bridge *bridge, +static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct stm_lvds *lvds = bridge_to_stm_lvds(bridge); struct drm_connector *connector = &lvds->connector; - struct drm_encoder *encoder = bridge->encoder; int ret; - if (!bridge->encoder) { + if (!encoder) { drm_err(bridge->dev, "Parent encoder object not found\n"); return -ENODEV; } /* Set the encoder type as caller does not know it */ - bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; /* No cloning support */ - bridge->encoder->possible_clones = 0; + encoder->possible_clones = 0; /* If we have a next bridge just attach it. */ if (lvds->next_bridge) - return drm_bridge_attach(bridge->encoder, lvds->next_bridge, + return drm_bridge_attach(encoder, lvds->next_bridge, bridge, flags); if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { diff --git a/drivers/gpu/drm/sysfb/Kconfig b/drivers/gpu/drm/sysfb/Kconfig new file mode 100644 index 000000000000..9c9884c7efc6 --- /dev/null +++ b/drivers/gpu/drm/sysfb/Kconfig @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: GPL-2.0-only + +menu "Drivers for system framebuffers" + depends on DRM + +config DRM_SYSFB_HELPER + tristate + depends on DRM + +config DRM_EFIDRM + tristate "EFI framebuffer driver" + depends on DRM && MMU && EFI && (!SYSFB_SIMPLEFB || COMPILE_TEST) + select APERTURE_HELPERS + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + select DRM_SYSFB_HELPER + select SYSFB + help + DRM driver for EFI framebuffers. + + This driver assumes that the display hardware has been initialized + by the firmware or bootloader before the kernel boots. Scanout + buffer, size, and display format must be provided via EFI interfaces. + +config DRM_OFDRM + tristate "Open Firmware display driver" + depends on DRM && MMU && OF && (PPC || COMPILE_TEST) + select APERTURE_HELPERS + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + select DRM_SYSFB_HELPER + help + DRM driver for Open Firmware framebuffers. + + This driver assumes that the display hardware has been initialized + by the Open Firmware before the kernel boots. Scanout buffer, size, + and display format must be provided via device tree. + +config DRM_SIMPLEDRM + tristate "Simple framebuffer driver" + depends on DRM && MMU + select APERTURE_HELPERS + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + select DRM_SYSFB_HELPER + help + DRM driver for simple platform-provided framebuffers. + + This driver assumes that the display hardware has been initialized + by the firmware or bootloader before the kernel boots. Scanout + buffer, size, and display format must be provided via device tree, + UEFI, VESA, etc. + + On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB + to use UEFI and VESA framebuffers. + +config DRM_VESADRM + tristate "VESA framebuffer driver" + depends on DRM && MMU && X86 && (!SYSFB_SIMPLEFB || COMPILE_TEST) + select APERTURE_HELPERS + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + select DRM_SYSFB_HELPER + select SYSFB + help + DRM driver for VESA framebuffers. + + This driver assumes that the display hardware has been initialized + by the firmware or bootloader before the kernel boots. Scanout + buffer, size, and display format must be provided via VBE interfaces. + +endmenu diff --git a/drivers/gpu/drm/sysfb/Makefile b/drivers/gpu/drm/sysfb/Makefile new file mode 100644 index 000000000000..a156c496413d --- /dev/null +++ b/drivers/gpu/drm/sysfb/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +drm_sysfb_helper-y := \ + drm_sysfb.o \ + drm_sysfb_modeset.o +drm_sysfb_helper-$(CONFIG_SCREEN_INFO) += drm_sysfb_screen_info.o +obj-$(CONFIG_DRM_SYSFB_HELPER) += drm_sysfb_helper.o + +obj-$(CONFIG_DRM_EFIDRM) += efidrm.o +obj-$(CONFIG_DRM_OFDRM) += ofdrm.o +obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o +obj-$(CONFIG_DRM_VESADRM) += vesadrm.o diff --git a/drivers/gpu/drm/sysfb/drm_sysfb.c b/drivers/gpu/drm/sysfb/drm_sysfb.c new file mode 100644 index 000000000000..308f82153b15 --- /dev/null +++ b/drivers/gpu/drm/sysfb/drm_sysfb.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/export.h> +#include <linux/limits.h> +#include <linux/minmax.h> +#include <linux/module.h> + +#include <drm/drm_print.h> + +#include "drm_sysfb_helper.h" + +int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name, + u64 value, u32 max) +{ + if (value > min(max, INT_MAX)) { + drm_warn(dev, "%s of %llu exceeds maximum of %u\n", name, value, max); + return -EINVAL; + } + return value; +} +EXPORT_SYMBOL(drm_sysfb_get_validated_int); + +int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name, + u64 value, u32 max) +{ + if (!value) { + drm_warn(dev, "%s of 0 not allowed\n", name); + return -EINVAL; + } + return drm_sysfb_get_validated_int(dev, name, value, max); +} +EXPORT_SYMBOL(drm_sysfb_get_validated_int0); + +MODULE_DESCRIPTION("Helpers for DRM sysfb drivers"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h new file mode 100644 index 000000000000..cb08a88242cc --- /dev/null +++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef DRM_SYSFB_HELPER_H +#define DRM_SYSFB_HELPER_H + +#include <linux/container_of.h> +#include <linux/iosys-map.h> + +#include <video/pixel_format.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <drm/drm_modes.h> + +struct drm_format_info; +struct drm_scanout_buffer; +struct screen_info; + +/* + * Input parsing + */ + +struct drm_sysfb_format { + struct pixel_format pixel; + u32 fourcc; +}; + +int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name, + u64 value, u32 max); +int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name, + u64 value, u32 max); + +#if defined(CONFIG_SCREEN_INFO) +int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si); +int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si); +struct resource *drm_sysfb_get_memory_si(struct drm_device *dev, + const struct screen_info *si, + struct resource *res); +int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si, + const struct drm_format_info *format, + unsigned int width, unsigned int height, u64 size); +u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si, + unsigned int height, unsigned int stride, u64 size); +const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev, + const struct drm_sysfb_format *formats, + size_t nformats, + const struct screen_info *si); +#endif + +/* + * Input parsing + */ + +int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name, + u64 value, u32 max); +int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name, + u64 value, u32 max); + +/* + * Display modes + */ + +struct drm_display_mode drm_sysfb_mode(unsigned int width, + unsigned int height, + unsigned int width_mm, + unsigned int height_mm); + +/* + * Device + */ + +struct drm_sysfb_device { + struct drm_device dev; + + const u8 *edid; /* can be NULL */ + + /* hardware settings */ + struct drm_display_mode fb_mode; + const struct drm_format_info *fb_format; + unsigned int fb_pitch; + unsigned int fb_gamma_lut_size; + + /* hardware-framebuffer kernel address */ + struct iosys_map fb_addr; +}; + +static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *dev) +{ + return container_of(dev, struct drm_sysfb_device, dev); +} + +/* + * Plane + */ + +int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *new_state); +void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state); +void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state); +int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb); + +#define DRM_SYSFB_PLANE_NFORMATS(_num_native) \ + ((_num_native) + 1) + +#define DRM_SYSFB_PLANE_FORMAT_MODIFIERS \ + DRM_FORMAT_MOD_LINEAR, \ + DRM_FORMAT_MOD_INVALID + +#define DRM_SYSFB_PLANE_HELPER_FUNCS \ + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \ + .atomic_check = drm_sysfb_plane_helper_atomic_check, \ + .atomic_update = drm_sysfb_plane_helper_atomic_update, \ + .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \ + .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer + +#define DRM_SYSFB_PLANE_FUNCS \ + .update_plane = drm_atomic_helper_update_plane, \ + .disable_plane = drm_atomic_helper_disable_plane, \ + DRM_GEM_SHADOW_PLANE_FUNCS + +/* + * CRTC + */ + +struct drm_sysfb_crtc_state { + struct drm_crtc_state base; + + /* Primary-plane format; required for color mgmt. */ + const struct drm_format_info *format; +}; + +static inline struct drm_sysfb_crtc_state * +to_drm_sysfb_crtc_state(struct drm_crtc_state *base) +{ + return container_of(base, struct drm_sysfb_crtc_state, base); +} + +enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode); +int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state); + +#define DRM_SYSFB_CRTC_HELPER_FUNCS \ + .mode_valid = drm_sysfb_crtc_helper_mode_valid, \ + .atomic_check = drm_sysfb_crtc_helper_atomic_check + +void drm_sysfb_crtc_reset(struct drm_crtc *crtc); +struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc); +void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state); + +#define DRM_SYSFB_CRTC_FUNCS \ + .reset = drm_sysfb_crtc_reset, \ + .set_config = drm_atomic_helper_set_config, \ + .page_flip = drm_atomic_helper_page_flip, \ + .atomic_duplicate_state = drm_sysfb_crtc_atomic_duplicate_state, \ + .atomic_destroy_state = drm_sysfb_crtc_atomic_destroy_state + +/* + * Connector + */ + +int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector); + +#define DRM_SYSFB_CONNECTOR_HELPER_FUNCS \ + .get_modes = drm_sysfb_connector_helper_get_modes + +#define DRM_SYSFB_CONNECTOR_FUNCS \ + .reset = drm_atomic_helper_connector_reset, \ + .fill_modes = drm_helper_probe_single_connector_modes, \ + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \ + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state + +/* + * Mode config + */ + +#define DRM_SYSFB_MODE_CONFIG_FUNCS \ + .fb_create = drm_gem_fb_create_with_dirty, \ + .atomic_check = drm_atomic_helper_check, \ + .atomic_commit = drm_atomic_helper_commit + +#endif diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c new file mode 100644 index 000000000000..ffaa2522ab96 --- /dev/null +++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/export.h> +#include <linux/slab.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_edid.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_panic.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "drm_sysfb_helper.h" + +struct drm_display_mode drm_sysfb_mode(unsigned int width, + unsigned int height, + unsigned int width_mm, + unsigned int height_mm) +{ + /* + * Assume a monitor resolution of 96 dpi to + * get a somewhat reasonable screen size. + */ + if (!width_mm) + width_mm = DRM_MODE_RES_MM(width, 96ul); + if (!height_mm) + height_mm = DRM_MODE_RES_MM(height, 96ul); + + { + const struct drm_display_mode mode = { + DRM_MODE_INIT(60, width, height, width_mm, height_mm) + }; + + return mode; + } +} +EXPORT_SYMBOL(drm_sysfb_mode); + +/* + * Plane + */ + +int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *new_state) +{ + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane); + struct drm_shadow_plane_state *new_shadow_plane_state = + to_drm_shadow_plane_state(new_plane_state); + struct drm_framebuffer *new_fb = new_plane_state->fb; + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + struct drm_sysfb_crtc_state *new_sysfb_crtc_state; + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; + + if (new_fb->format != sysfb->fb_format) { + void *buf; + + /* format conversion necessary; reserve buffer */ + buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state, + sysfb->fb_pitch, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc); + + new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state); + new_sysfb_crtc_state->format = new_fb->format; + + return 0; +} +EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check); + +void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + unsigned int dst_pitch = sysfb->fb_pitch; + const struct drm_format_info *dst_format = sysfb->fb_format; + struct drm_atomic_helper_damage_iter iter; + struct drm_rect damage; + int ret, idx; + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return; + + if (!drm_dev_enter(dev, &idx)) + goto out_drm_gem_fb_end_cpu_access; + + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + struct iosys_map dst = sysfb->fb_addr; + struct drm_rect dst_clip = plane_state->dst; + + if (!drm_rect_intersect(&dst_clip, &damage)) + continue; + + iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip)); + drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb, + &damage, &shadow_plane_state->fmtcnv_state); + } + + drm_dev_exit(idx); +out_drm_gem_fb_end_cpu_access: + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); +} +EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_update); + +void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); + struct iosys_map dst = sysfb->fb_addr; + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */ + unsigned int dst_pitch = sysfb->fb_pitch; + const struct drm_format_info *dst_format = sysfb->fb_format; + struct drm_rect dst_clip; + unsigned long lines, linepixels, i; + int idx; + + drm_rect_init(&dst_clip, + plane_state->src_x >> 16, plane_state->src_y >> 16, + plane_state->src_w >> 16, plane_state->src_h >> 16); + + lines = drm_rect_height(&dst_clip); + linepixels = drm_rect_width(&dst_clip); + + if (!drm_dev_enter(dev, &idx)) + return; + + /* Clear buffer to black if disabled */ + dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip); + for (i = 0; i < lines; ++i) { + memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]); + dst_vmap += dst_pitch; + } + + drm_dev_exit(idx); +} +EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_disable); + +int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev); + + sb->width = sysfb->fb_mode.hdisplay; + sb->height = sysfb->fb_mode.vdisplay; + sb->format = sysfb->fb_format; + sb->pitch[0] = sysfb->fb_pitch; + sb->map[0] = sysfb->fb_addr; + + return 0; +} +EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer); + +/* + * CRTC + */ + +static void drm_sysfb_crtc_state_destroy(struct drm_sysfb_crtc_state *sysfb_crtc_state) +{ + __drm_atomic_helper_crtc_destroy_state(&sysfb_crtc_state->base); + + kfree(sysfb_crtc_state); +} + +enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sysfb->fb_mode); +} +EXPORT_SYMBOL(drm_sysfb_crtc_helper_mode_valid); + +int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) +{ + struct drm_device *dev = crtc->dev; + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); + struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); + int ret; + + if (!new_crtc_state->enable) + return 0; + + ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state); + if (ret) + return ret; + + if (new_crtc_state->color_mgmt_changed) { + const size_t gamma_lut_length = + sysfb->fb_gamma_lut_size * sizeof(struct drm_color_lut); + const struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut; + + if (gamma_lut && (gamma_lut->length != gamma_lut_length)) { + drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check); + +void drm_sysfb_crtc_reset(struct drm_crtc *crtc) +{ + struct drm_sysfb_crtc_state *sysfb_crtc_state; + + if (crtc->state) + drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state)); + + sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL); + if (sysfb_crtc_state) + __drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} +EXPORT_SYMBOL(drm_sysfb_crtc_reset); + +struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = crtc->state; + struct drm_sysfb_crtc_state *new_sysfb_crtc_state; + struct drm_sysfb_crtc_state *sysfb_crtc_state; + + if (drm_WARN_ON(dev, !crtc_state)) + return NULL; + + new_sysfb_crtc_state = kzalloc(sizeof(*new_sysfb_crtc_state), GFP_KERNEL); + if (!new_sysfb_crtc_state) + return NULL; + + sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); + + __drm_atomic_helper_crtc_duplicate_state(crtc, &new_sysfb_crtc_state->base); + new_sysfb_crtc_state->format = sysfb_crtc_state->format; + + return &new_sysfb_crtc_state->base; +} +EXPORT_SYMBOL(drm_sysfb_crtc_atomic_duplicate_state); + +void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) +{ + drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc_state)); +} +EXPORT_SYMBOL(drm_sysfb_crtc_atomic_destroy_state); + +/* + * Connector + */ + +static int drm_sysfb_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct drm_sysfb_device *sysfb = data; + const u8 *edid = sysfb->edid; + size_t off = block * EDID_LENGTH; + size_t end = off + len; + + if (!edid) + return -EINVAL; + if (end > EDID_LENGTH) + return -EINVAL; + memcpy(buf, &edid[off], len); + + /* + * We don't have EDID extensions available and reporting them + * will upset DRM helpers. Thus clear the extension field and + * update the checksum. Adding the extension flag to the checksum + * does this. + */ + buf[127] += buf[126]; + buf[126] = 0; + + return 0; +} + +int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector) +{ + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(connector->dev); + const struct drm_edid *drm_edid; + + if (sysfb->edid) { + drm_edid = drm_edid_read_custom(connector, drm_sysfb_get_edid_block, sysfb); + drm_edid_connector_update(connector, drm_edid); + drm_edid_free(drm_edid); + } + + /* Return the fixed mode even with EDID */ + return drm_connector_helper_get_modes_fixed(connector, &sysfb->fb_mode); +} +EXPORT_SYMBOL(drm_sysfb_connector_helper_get_modes); diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c new file mode 100644 index 000000000000..0b3fb874a51f --- /dev/null +++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/export.h> +#include <linux/limits.h> +#include <linux/minmax.h> +#include <linux/screen_info.h> + +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> + +#include "drm_sysfb_helper.h" + +static s64 drm_sysfb_get_validated_size0(struct drm_device *dev, const char *name, + u64 value, u64 max) +{ + if (!value) { + drm_warn(dev, "%s of 0 not allowed\n", name); + return -EINVAL; + } else if (value > min(max, S64_MAX)) { + drm_warn(dev, "%s of %llu exceeds maximum of %llu\n", name, value, max); + return -EINVAL; + } + return value; +} + +int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si) +{ + return drm_sysfb_get_validated_int0(dev, "width", si->lfb_width, U16_MAX); +} +EXPORT_SYMBOL(drm_sysfb_get_width_si); + +int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si) +{ + return drm_sysfb_get_validated_int0(dev, "height", si->lfb_height, U16_MAX); +} +EXPORT_SYMBOL(drm_sysfb_get_height_si); + +struct resource *drm_sysfb_get_memory_si(struct drm_device *dev, + const struct screen_info *si, + struct resource *res) +{ + ssize_t num; + + num = screen_info_resources(si, res, 1); + if (!num) { + drm_warn(dev, "memory resource not found\n"); + return NULL; + } + + return res; +} +EXPORT_SYMBOL(drm_sysfb_get_memory_si); + +int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si, + const struct drm_format_info *format, + unsigned int width, unsigned int height, u64 size) +{ + u64 lfb_linelength = si->lfb_linelength; + + if (!lfb_linelength) + lfb_linelength = drm_format_info_min_pitch(format, 0, width); + + return drm_sysfb_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height)); +} +EXPORT_SYMBOL(drm_sysfb_get_stride_si); + +u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si, + unsigned int height, unsigned int stride, u64 size) +{ + u64 vsize = PAGE_ALIGN(height * stride); + + return drm_sysfb_get_validated_size0(dev, "visible size", vsize, size); +} +EXPORT_SYMBOL(drm_sysfb_get_visible_size_si); + +const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev, + const struct drm_sysfb_format *formats, + size_t nformats, + const struct screen_info *si) +{ + const struct drm_format_info *format = NULL; + u32 bits_per_pixel; + size_t i; + + bits_per_pixel = __screen_info_lfb_bits_per_pixel(si); + + for (i = 0; i < nformats; ++i) { + const struct pixel_format *f = &formats[i].pixel; + + if (bits_per_pixel == f->bits_per_pixel && + si->red_size == f->red.length && + si->red_pos == f->red.offset && + si->green_size == f->green.length && + si->green_pos == f->green.offset && + si->blue_size == f->blue.length && + si->blue_pos == f->blue.offset) { + format = drm_format_info(formats[i].fourcc); + break; + } + } + + if (!format) + drm_warn(dev, "No compatible color format found\n"); + + return format; +} +EXPORT_SYMBOL(drm_sysfb_get_format_si); diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c new file mode 100644 index 000000000000..46912924636a --- /dev/null +++ b/drivers/gpu/drm/sysfb/efidrm.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/aperture.h> +#include <linux/efi.h> +#include <linux/limits.h> +#include <linux/platform_device.h> +#include <linux/screen_info.h> + +#include <drm/clients/drm_client_setup.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_connector.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_edid.h> +#include <drm/drm_fbdev_shmem.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_probe_helper.h> + +#include <video/edid.h> +#include <video/pixel_format.h> + +#include "drm_sysfb_helper.h" + +#define DRIVER_NAME "efidrm" +#define DRIVER_DESC "DRM driver for EFI platform devices" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static const struct drm_format_info *efidrm_get_format_si(struct drm_device *dev, + const struct screen_info *si) +{ + static const struct drm_sysfb_format formats[] = { + { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, }, + { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, }, + { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, }, + { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, }, + { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, }, + { PIXEL_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010, }, + }; + + return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si); +} + +static u64 efidrm_get_mem_flags(struct drm_device *dev, resource_size_t start, + resource_size_t len) +{ + u64 attribute = EFI_MEMORY_UC | EFI_MEMORY_WC | + EFI_MEMORY_WT | EFI_MEMORY_WB; + u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC; + resource_size_t end = start + len; + efi_memory_desc_t md; + u64 md_end; + + if (!efi_enabled(EFI_MEMMAP) || efi_mem_desc_lookup(start, &md)) + goto out; + + md_end = md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT); + if (end > md_end) + goto out; + + attribute &= md.attribute; + if (attribute) { + mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB; + mem_flags &= attribute; + } + +out: + return mem_flags; +} + +/* + * EFI device + */ + +struct efidrm_device { + struct drm_sysfb_device sysfb; + + /* modesetting */ + u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)]; + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; +}; + +/* + * Modesetting + */ + +static const u64 efidrm_primary_plane_format_modifiers[] = { + DRM_SYSFB_PLANE_FORMAT_MODIFIERS, +}; + +static const struct drm_plane_helper_funcs efidrm_primary_plane_helper_funcs = { + DRM_SYSFB_PLANE_HELPER_FUNCS, +}; + +static const struct drm_plane_funcs efidrm_primary_plane_funcs = { + DRM_SYSFB_PLANE_FUNCS, + .destroy = drm_plane_cleanup, +}; + +static const struct drm_crtc_helper_funcs efidrm_crtc_helper_funcs = { + DRM_SYSFB_CRTC_HELPER_FUNCS, +}; + +static const struct drm_crtc_funcs efidrm_crtc_funcs = { + DRM_SYSFB_CRTC_FUNCS, + .destroy = drm_crtc_cleanup, +}; + +static const struct drm_encoder_funcs efidrm_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static const struct drm_connector_helper_funcs efidrm_connector_helper_funcs = { + DRM_SYSFB_CONNECTOR_HELPER_FUNCS, +}; + +static const struct drm_connector_funcs efidrm_connector_funcs = { + DRM_SYSFB_CONNECTOR_FUNCS, + .destroy = drm_connector_cleanup, +}; + +static const struct drm_mode_config_funcs efidrm_mode_config_funcs = { + DRM_SYSFB_MODE_CONFIG_FUNCS, +}; + +/* + * Init / Cleanup + */ + +static struct efidrm_device *efidrm_device_create(struct drm_driver *drv, + struct platform_device *pdev) +{ + const struct screen_info *si; + const struct drm_format_info *format; + int width, height, stride; + u64 vsize, mem_flags; + struct resource resbuf; + struct resource *res; + struct efidrm_device *efi; + struct drm_sysfb_device *sysfb; + struct drm_device *dev; + struct resource *mem = NULL; + void __iomem *screen_base = NULL; + struct drm_plane *primary_plane; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + unsigned long max_width, max_height; + size_t nformats; + int ret; + + si = dev_get_platdata(&pdev->dev); + if (!si) + return ERR_PTR(-ENODEV); + if (screen_info_video_type(si) != VIDEO_TYPE_EFI) + return ERR_PTR(-ENODEV); + + /* + * EFI DRM driver + */ + + efi = devm_drm_dev_alloc(&pdev->dev, drv, struct efidrm_device, sysfb.dev); + if (IS_ERR(efi)) + return ERR_CAST(efi); + sysfb = &efi->sysfb; + dev = &sysfb->dev; + platform_set_drvdata(pdev, dev); + + /* + * Hardware settings + */ + + format = efidrm_get_format_si(dev, si); + if (!format) + return ERR_PTR(-EINVAL); + width = drm_sysfb_get_width_si(dev, si); + if (width < 0) + return ERR_PTR(width); + height = drm_sysfb_get_height_si(dev, si); + if (height < 0) + return ERR_PTR(height); + res = drm_sysfb_get_memory_si(dev, si, &resbuf); + if (!res) + return ERR_PTR(-EINVAL); + stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res)); + if (stride < 0) + return ERR_PTR(stride); + vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res)); + if (!vsize) + return ERR_PTR(-EINVAL); + + drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n", + &format->format, width, height, stride); + +#ifdef CONFIG_X86 + if (drm_edid_header_is_valid(edid_info.dummy) == 8) + sysfb->edid = edid_info.dummy; +#endif + sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0); + sysfb->fb_format = format; + sysfb->fb_pitch = stride; + + /* + * Memory management + */ + + ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize); + if (ret) { + drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret); + return ERR_PTR(ret); + } + + drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res); + + mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name); + if (!mem) { + /* + * We cannot make this fatal. Sometimes this comes from magic + * spaces our resource handlers simply don't know about. Use + * the I/O-memory resource as-is and try to map that instead. + */ + drm_warn(dev, "could not acquire memory region %pr\n", res); + mem = res; + } + + mem_flags = efidrm_get_mem_flags(dev, res->start, vsize); + + if (mem_flags & EFI_MEMORY_WC) + screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem)); + else if (mem_flags & EFI_MEMORY_UC) + screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + else if (mem_flags & EFI_MEMORY_WT) + screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem), + MEMREMAP_WT); + else if (mem_flags & EFI_MEMORY_WB) + screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem), + MEMREMAP_WB); + else + drm_err(dev, "invalid mem_flags: 0x%llx\n", mem_flags); + if (!screen_base) + return ERR_PTR(-ENOMEM); + iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); + + /* + * Modesetting + */ + + ret = drmm_mode_config_init(dev); + if (ret) + return ERR_PTR(ret); + + max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH); + max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT); + + dev->mode_config.min_width = width; + dev->mode_config.max_width = max_width; + dev->mode_config.min_height = height; + dev->mode_config.max_height = max_height; + dev->mode_config.preferred_depth = format->depth; + dev->mode_config.funcs = &efidrm_mode_config_funcs; + + /* Primary plane */ + + nformats = drm_fb_build_fourcc_list(dev, &format->format, 1, + efi->formats, ARRAY_SIZE(efi->formats)); + + primary_plane = &efi->primary_plane; + ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs, + efi->formats, nformats, + efidrm_primary_plane_format_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ERR_PTR(ret); + drm_plane_helper_add(primary_plane, &efidrm_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); + + /* CRTC */ + + crtc = &efi->crtc; + ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, + &efidrm_crtc_funcs, NULL); + if (ret) + return ERR_PTR(ret); + drm_crtc_helper_add(crtc, &efidrm_crtc_helper_funcs); + + /* Encoder */ + + encoder = &efi->encoder; + ret = drm_encoder_init(dev, encoder, &efidrm_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ERR_PTR(ret); + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* Connector */ + + connector = &efi->connector; + ret = drm_connector_init(dev, connector, &efidrm_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + if (ret) + return ERR_PTR(ret); + drm_connector_helper_add(connector, &efidrm_connector_helper_funcs); + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + width, height); + if (sysfb->edid) + drm_connector_attach_edid_property(connector); + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ERR_PTR(ret); + + drm_mode_config_reset(dev); + + return efi; +} + +/* + * DRM driver + */ + +DEFINE_DRM_GEM_FOPS(efidrm_fops); + +static struct drm_driver efidrm_driver = { + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, + .fops = &efidrm_fops, +}; + +/* + * Platform driver + */ + +static int efidrm_probe(struct platform_device *pdev) +{ + struct efidrm_device *efi; + struct drm_sysfb_device *sysfb; + struct drm_device *dev; + int ret; + + efi = efidrm_device_create(&efidrm_driver, pdev); + if (IS_ERR(efi)) + return PTR_ERR(efi); + sysfb = &efi->sysfb; + dev = &sysfb->dev; + + ret = drm_dev_register(dev, 0); + if (ret) + return ret; + + drm_client_setup(dev, sysfb->fb_format); + + return 0; +} + +static void efidrm_remove(struct platform_device *pdev) +{ + struct drm_device *dev = platform_get_drvdata(pdev); + + drm_dev_unplug(dev); +} + +static struct platform_driver efidrm_platform_driver = { + .driver = { + .name = "efi-framebuffer", + }, + .probe = efidrm_probe, + .remove = efidrm_remove, +}; + +module_platform_driver(efidrm_platform_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c index 13491c0e704a..fddfe8bea9f7 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/sysfb/ofdrm.c @@ -12,6 +12,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> +#include <drm/drm_edid.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_format_helper.h> #include <drm/drm_framebuffer.h> @@ -21,7 +22,8 @@ #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> + +#include "drm_sysfb_helper.h" #define DRIVER_NAME "ofdrm" #define DRIVER_DESC "DRM driver for OF platform devices" @@ -76,20 +78,12 @@ enum ofdrm_model { static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value) { - if (value > INT_MAX) { - drm_err(dev, "invalid framebuffer %s of %u\n", name, value); - return -EINVAL; - } - return (int)value; + return drm_sysfb_get_validated_int(dev, name, value, INT_MAX); } static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value) { - if (!value) { - drm_err(dev, "invalid framebuffer %s of %u\n", name, value); - return -EINVAL; - } - return display_get_validated_int(dev, name, value); + return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX); } static const struct drm_format_info *display_get_validated_format(struct drm_device *dev, @@ -226,6 +220,16 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of return address; } +static const u8 *display_get_edid_of(struct drm_device *dev, struct device_node *of_node, + u8 buf[EDID_LENGTH]) +{ + int ret = of_property_read_u8_array(of_node, "EDID", buf, EDID_LENGTH); + + if (ret) + return NULL; + return buf; +} + static bool is_avivo(u32 vendor, u32 device) { /* This will match most R5xx */ @@ -290,22 +294,17 @@ struct ofdrm_device_funcs { }; struct ofdrm_device { - struct drm_device dev; - struct platform_device *pdev; + struct drm_sysfb_device sysfb; const struct ofdrm_device_funcs *funcs; - /* firmware-buffer settings */ - struct iosys_map screen_base; - struct drm_display_mode mode; - const struct drm_format_info *format; - unsigned int pitch; - /* colormap */ void __iomem *cmap_base; + u8 edid[EDID_LENGTH]; + /* modesetting */ - uint32_t formats[8]; + u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)]; struct drm_plane primary_plane; struct drm_crtc crtc; struct drm_encoder encoder; @@ -314,7 +313,7 @@ struct ofdrm_device { static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev) { - return container_of(dev, struct ofdrm_device, dev); + return container_of(to_drm_sysfb_device(dev), struct ofdrm_device, sysfb); } /* @@ -354,7 +353,7 @@ static void ofdrm_pci_release(void *data) static int ofdrm_device_init_pci(struct ofdrm_device *odev) { - struct drm_device *dev = &odev->dev; + struct drm_device *dev = &odev->sysfb.dev; struct platform_device *pdev = to_platform_device(dev->dev); struct device_node *of_node = pdev->dev.of_node; struct pci_dev *pcidev; @@ -397,7 +396,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev) static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev, struct resource *fb_res) { - struct platform_device *pdev = to_platform_device(odev->dev.dev); + struct platform_device *pdev = to_platform_device(odev->sysfb.dev.dev); struct resource *res, *max_res = NULL; u32 i; @@ -423,7 +422,7 @@ static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev, static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node, int bar_no, unsigned long offset, unsigned long size) { - struct drm_device *dev = &odev->dev; + struct drm_device *dev = &odev->sysfb.dev; const __be32 *addr_p; u64 max_size, address; unsigned int flags; @@ -456,7 +455,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev, struct device_node *of_node, u64 fb_base) { - struct drm_device *dev = &odev->dev; + struct drm_device *dev = &odev->sysfb.dev; u64 address; void __iomem *cmap_base; @@ -618,7 +617,7 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev, cpu_to_be32(0x00), }; - struct drm_device *dev = &odev->dev; + struct drm_device *dev = &odev->sysfb.dev; u64 address; void __iomem *cmap_base; @@ -648,7 +647,7 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev, const struct drm_format_info *format) { - struct drm_device *dev = &odev->dev; + struct drm_device *dev = &odev->sysfb.dev; int i; switch (format->format) { @@ -687,7 +686,7 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev, const struct drm_format_info *format, struct drm_color_lut *lut) { - struct drm_device *dev = &odev->dev; + struct drm_device *dev = &odev->sysfb.dev; int i; switch (format->format) { @@ -731,205 +730,27 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev, * Modesetting */ -struct ofdrm_crtc_state { - struct drm_crtc_state base; - - /* Primary-plane format; required for color mgmt. */ - const struct drm_format_info *format; +static const u64 ofdrm_primary_plane_format_modifiers[] = { + DRM_SYSFB_PLANE_FORMAT_MODIFIERS, }; -static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base) -{ - return container_of(base, struct ofdrm_crtc_state, base); -} - -static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state) -{ - __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base); - kfree(ofdrm_crtc_state); -} - -static const uint64_t ofdrm_primary_plane_format_modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *new_state) -{ - struct drm_device *dev = plane->dev; - struct ofdrm_device *odev = ofdrm_device_of_dev(dev); - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane); - struct drm_shadow_plane_state *new_shadow_plane_state = - to_drm_shadow_plane_state(new_plane_state); - struct drm_framebuffer *new_fb = new_plane_state->fb; - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - struct ofdrm_crtc_state *new_ofdrm_crtc_state; - int ret; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc); - - ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); - if (ret) - return ret; - else if (!new_plane_state->visible) - return 0; - - if (new_fb->format != odev->format) { - void *buf; - - /* format conversion necessary; reserve buffer */ - buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state, - odev->pitch, GFP_KERNEL); - if (!buf) - return -ENOMEM; - } - - new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc); - - new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state); - new_ofdrm_crtc_state->format = new_fb->format; - - return 0; -} - -static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_device *dev = plane->dev; - struct ofdrm_device *odev = ofdrm_device_of_dev(dev); - struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); - struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); - struct drm_framebuffer *fb = plane_state->fb; - unsigned int dst_pitch = odev->pitch; - const struct drm_format_info *dst_format = odev->format; - struct drm_atomic_helper_damage_iter iter; - struct drm_rect damage; - int ret, idx; - - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); - if (ret) - return; - - if (!drm_dev_enter(dev, &idx)) - goto out_drm_gem_fb_end_cpu_access; - - drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); - drm_atomic_for_each_plane_damage(&iter, &damage) { - struct iosys_map dst = odev->screen_base; - struct drm_rect dst_clip = plane_state->dst; - - if (!drm_rect_intersect(&dst_clip, &damage)) - continue; - - iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip)); - drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb, - &damage, &shadow_plane_state->fmtcnv_state); - } - - drm_dev_exit(idx); -out_drm_gem_fb_end_cpu_access: - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); -} - -static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_device *dev = plane->dev; - struct ofdrm_device *odev = ofdrm_device_of_dev(dev); - struct iosys_map dst = odev->screen_base; - struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); - void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */ - unsigned int dst_pitch = odev->pitch; - const struct drm_format_info *dst_format = odev->format; - struct drm_rect dst_clip; - unsigned long lines, linepixels, i; - int idx; - - drm_rect_init(&dst_clip, - plane_state->src_x >> 16, plane_state->src_y >> 16, - plane_state->src_w >> 16, plane_state->src_h >> 16); - - lines = drm_rect_height(&dst_clip); - linepixels = drm_rect_width(&dst_clip); - - if (!drm_dev_enter(dev, &idx)) - return; - - /* Clear buffer to black if disabled */ - dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip); - for (i = 0; i < lines; ++i) { - memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]); - dst_vmap += dst_pitch; - } - - drm_dev_exit(idx); -} - static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = { - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = ofdrm_primary_plane_helper_atomic_check, - .atomic_update = ofdrm_primary_plane_helper_atomic_update, - .atomic_disable = ofdrm_primary_plane_helper_atomic_disable, + DRM_SYSFB_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs ofdrm_primary_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, + DRM_SYSFB_PLANE_FUNCS, .destroy = drm_plane_cleanup, - DRM_GEM_SHADOW_PLANE_FUNCS, }; -static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) -{ - struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev); - - return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode); -} - -static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_atomic_state *new_state) -{ - static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut); - - struct drm_device *dev = crtc->dev; - struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); - int ret; - - if (!new_crtc_state->enable) - return 0; - - ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state); - if (ret) - return ret; - - if (new_crtc_state->color_mgmt_changed) { - struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut; - - if (gamma_lut && (gamma_lut->length != gamma_lut_length)) { - drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length); - return -EINVAL; - } - } - - return 0; -} - static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev); struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state); + struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); if (crtc_state->enable && crtc_state->color_mgmt_changed) { - const struct drm_format_info *format = ofdrm_crtc_state->format; + const struct drm_format_info *format = sysfb_crtc_state->format; if (crtc_state->gamma_lut) ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data); @@ -938,91 +759,31 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato } } -/* - * The CRTC is always enabled. Screen updates are performed by - * the primary plane's atomic_update function. Disabling clears - * the screen in the primary plane's atomic_disable function. - */ static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = { - .mode_valid = ofdrm_crtc_helper_mode_valid, - .atomic_check = ofdrm_crtc_helper_atomic_check, + DRM_SYSFB_CRTC_HELPER_FUNCS, .atomic_flush = ofdrm_crtc_helper_atomic_flush, }; -static void ofdrm_crtc_reset(struct drm_crtc *crtc) -{ - struct ofdrm_crtc_state *ofdrm_crtc_state = - kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL); - - if (crtc->state) - ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state)); - - if (ofdrm_crtc_state) - __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base); - else - __drm_atomic_helper_crtc_reset(crtc, NULL); -} - -static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_crtc_state *crtc_state = crtc->state; - struct ofdrm_crtc_state *new_ofdrm_crtc_state; - struct ofdrm_crtc_state *ofdrm_crtc_state; - - if (drm_WARN_ON(dev, !crtc_state)) - return NULL; - - new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL); - if (!new_ofdrm_crtc_state) - return NULL; - - ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state); - - __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base); - new_ofdrm_crtc_state->format = ofdrm_crtc_state->format; - - return &new_ofdrm_crtc_state->base; -} - -static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc, - struct drm_crtc_state *crtc_state) -{ - ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state)); -} - static const struct drm_crtc_funcs ofdrm_crtc_funcs = { - .reset = ofdrm_crtc_reset, + DRM_SYSFB_CRTC_FUNCS, .destroy = drm_crtc_cleanup, - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state, - .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state, }; -static int ofdrm_connector_helper_get_modes(struct drm_connector *connector) -{ - struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev); - - return drm_connector_helper_get_modes_fixed(connector, &odev->mode); -} +static const struct drm_encoder_funcs ofdrm_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = { - .get_modes = ofdrm_connector_helper_get_modes, + DRM_SYSFB_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs ofdrm_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, + DRM_SYSFB_CONNECTOR_FUNCS, .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = { - .fb_create = drm_gem_fb_create_with_dirty, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, + DRM_SYSFB_MODE_CONFIG_FUNCS, }; /* @@ -1072,32 +833,19 @@ static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = { .cmap_write = ofdrm_qemu_cmap_write, }; -static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height) -{ - /* - * Assume a monitor resolution of 96 dpi to - * get a somewhat reasonable screen size. - */ - const struct drm_display_mode mode = { - DRM_MODE_INIT(60, width, height, - DRM_MODE_RES_MM(width, 96ul), - DRM_MODE_RES_MM(height, 96ul)) - }; - - return mode; -} - static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, struct platform_device *pdev) { struct device_node *of_node = pdev->dev.of_node; struct ofdrm_device *odev; + struct drm_sysfb_device *sysfb; struct drm_device *dev; enum ofdrm_model model; bool big_endian; int width, height, depth, linebytes; const struct drm_format_info *format; u64 address; + const u8 *edid; resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize; struct resource *res, *mem; void __iomem *screen_base; @@ -1109,10 +857,11 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, size_t nformats; int ret; - odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev); + odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, sysfb.dev); if (IS_ERR(odev)) return ERR_CAST(odev); - dev = &odev->dev; + sysfb = &odev->sysfb; + dev = &sysfb->dev; platform_set_drvdata(pdev, dev); ret = ofdrm_device_init_pci(odev); @@ -1246,16 +995,22 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, } } + /* EDID is optional */ + edid = display_get_edid_of(dev, of_node, odev->edid); + /* * Firmware framebuffer */ - iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base); - odev->mode = ofdrm_mode(width, height); - odev->format = format; - odev->pitch = linebytes; + iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); + sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0); + sysfb->fb_format = format; + sysfb->fb_pitch = linebytes; + if (odev->cmap_base) + sysfb->fb_gamma_lut_size = OFDRM_GAMMA_LUT_SIZE; + sysfb->edid = edid; - drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode)); + drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode)); drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n", &format->format, width, height, linebytes); @@ -1302,15 +1057,16 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, return ERR_PTR(ret); drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs); - if (odev->cmap_base) { - drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE); - drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE); + if (sysfb->fb_gamma_lut_size) { + ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size); + if (!ret) + drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size); } /* Encoder */ encoder = &odev->encoder; - ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE); + ret = drm_encoder_init(dev, encoder, &ofdrm_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); if (ret) return ERR_PTR(ret); encoder->possible_crtcs = drm_crtc_mask(crtc); @@ -1326,6 +1082,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv, drm_connector_set_panel_orientation_with_quirk(connector, DRM_MODE_PANEL_ORIENTATION_UNKNOWN, width, height); + if (edid) + drm_connector_attach_edid_property(connector); ret = drm_connector_attach_encoder(connector, encoder); if (ret) @@ -1360,19 +1118,21 @@ static struct drm_driver ofdrm_driver = { static int ofdrm_probe(struct platform_device *pdev) { struct ofdrm_device *odev; + struct drm_sysfb_device *sysfb; struct drm_device *dev; int ret; odev = ofdrm_device_create(&ofdrm_driver, pdev); if (IS_ERR(odev)) return PTR_ERR(odev); - dev = &odev->dev; + sysfb = &odev->sysfb; + dev = &sysfb->dev; ret = drm_dev_register(dev, 0); if (ret) return ret; - drm_client_setup(dev, odev->format); + drm_client_setup(dev, sysfb->fb_format); return 0; } diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c index 5d9ab8adf800..a1c3119330de 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/sysfb/simpledrm.c @@ -14,7 +14,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_connector.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -26,9 +25,10 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_panic.h> #include <drm/drm_probe_helper.h> +#include "drm_sysfb_helper.h" + #define DRIVER_NAME "simpledrm" #define DRIVER_DESC "DRM driver for simple-framebuffer platform devices" #define DRIVER_MAJOR 1 @@ -42,24 +42,14 @@ static int simplefb_get_validated_int(struct drm_device *dev, const char *name, uint32_t value) { - if (value > INT_MAX) { - drm_err(dev, "simplefb: invalid framebuffer %s of %u\n", - name, value); - return -EINVAL; - } - return (int)value; + return drm_sysfb_get_validated_int(dev, name, value, INT_MAX); } static int simplefb_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value) { - if (!value) { - drm_err(dev, "simplefb: invalid framebuffer %s of %u\n", - name, value); - return -EINVAL; - } - return simplefb_get_validated_int(dev, name, value); + return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX); } static const struct drm_format_info * @@ -217,7 +207,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node) */ struct simpledrm_device { - struct drm_device dev; + struct drm_sysfb_device sysfb; /* clocks */ #if defined CONFIG_OF && defined CONFIG_COMMON_CLK @@ -236,28 +226,14 @@ struct simpledrm_device { struct device_link **pwr_dom_links; #endif - /* simplefb settings */ - struct drm_display_mode mode; - const struct drm_format_info *format; - unsigned int pitch; - - /* memory management */ - struct iosys_map screen_base; - /* modesetting */ - uint32_t formats[8]; - size_t nformats; + u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)]; struct drm_plane primary_plane; struct drm_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; }; -static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev) -{ - return container_of(dev, struct simpledrm_device, dev); -} - /* * Hardware */ @@ -284,7 +260,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev) static void simpledrm_device_release_clocks(void *res) { - struct simpledrm_device *sdev = simpledrm_device_of_dev(res); + struct simpledrm_device *sdev = res; unsigned int i; for (i = 0; i < sdev->clk_count; ++i) { @@ -297,7 +273,7 @@ static void simpledrm_device_release_clocks(void *res) static int simpledrm_device_init_clocks(struct simpledrm_device *sdev) { - struct drm_device *dev = &sdev->dev; + struct drm_device *dev = &sdev->sysfb.dev; struct platform_device *pdev = to_platform_device(dev->dev); struct device_node *of_node = pdev->dev.of_node; struct clk *clock; @@ -382,7 +358,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev) static void simpledrm_device_release_regulators(void *res) { - struct simpledrm_device *sdev = simpledrm_device_of_dev(res); + struct simpledrm_device *sdev = res; unsigned int i; for (i = 0; i < sdev->regulator_count; ++i) { @@ -395,7 +371,7 @@ static void simpledrm_device_release_regulators(void *res) static int simpledrm_device_init_regulators(struct simpledrm_device *sdev) { - struct drm_device *dev = &sdev->dev; + struct drm_device *dev = &sdev->sysfb.dev; struct platform_device *pdev = to_platform_device(dev->dev); struct device_node *of_node = pdev->dev.of_node; struct property *prop; @@ -516,7 +492,7 @@ static void simpledrm_device_detach_genpd(void *res) static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev) { - struct device *dev = sdev->dev.dev; + struct device *dev = sdev->sysfb.dev.dev; int i; sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -548,7 +524,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev) simpledrm_device_detach_genpd(sdev); return ret; } - drm_warn(&sdev->dev, + drm_warn(&sdev->sysfb.dev, "pm_domain_attach_by_id(%u) failed: %d\n", i, ret); continue; } @@ -559,7 +535,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev) DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!sdev->pwr_dom_links[i]) - drm_warn(&sdev->dev, "failed to link power-domain %d\n", i); + drm_warn(&sdev->sysfb.dev, "failed to link power-domain %d\n", i); } return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev); @@ -575,210 +551,56 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev) * Modesetting */ -static const uint64_t simpledrm_primary_plane_format_modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID +static const u64 simpledrm_primary_plane_format_modifiers[] = { + DRM_SYSFB_PLANE_FORMAT_MODIFIERS, }; -static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); - struct drm_shadow_plane_state *new_shadow_plane_state = - to_drm_shadow_plane_state(new_plane_state); - struct drm_framebuffer *new_fb = new_plane_state->fb; - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - struct drm_device *dev = plane->dev; - struct simpledrm_device *sdev = simpledrm_device_of_dev(dev); - int ret; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); - - ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); - if (ret) - return ret; - else if (!new_plane_state->visible) - return 0; - - if (new_fb->format != sdev->format) { - void *buf; - - /* format conversion necessary; reserve buffer */ - buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state, - sdev->pitch, GFP_KERNEL); - if (!buf) - return -ENOMEM; - } - - return 0; -} - -static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); - struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); - struct drm_framebuffer *fb = plane_state->fb; - struct drm_device *dev = plane->dev; - struct simpledrm_device *sdev = simpledrm_device_of_dev(dev); - struct drm_atomic_helper_damage_iter iter; - struct drm_rect damage; - int ret, idx; - - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); - if (ret) - return; - - if (!drm_dev_enter(dev, &idx)) - goto out_drm_gem_fb_end_cpu_access; - - drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); - drm_atomic_for_each_plane_damage(&iter, &damage) { - struct drm_rect dst_clip = plane_state->dst; - struct iosys_map dst = sdev->screen_base; - - if (!drm_rect_intersect(&dst_clip, &damage)) - continue; - - iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip)); - drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, - fb, &damage, &shadow_plane_state->fmtcnv_state); - } - - drm_dev_exit(idx); -out_drm_gem_fb_end_cpu_access: - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); -} - -static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_device *dev = plane->dev; - struct simpledrm_device *sdev = simpledrm_device_of_dev(dev); - int idx; - - if (!drm_dev_enter(dev, &idx)) - return; - - /* Clear screen to black if disabled */ - iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay); - - drm_dev_exit(idx); -} - -static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane, - struct drm_scanout_buffer *sb) -{ - struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev); - - sb->width = sdev->mode.hdisplay; - sb->height = sdev->mode.vdisplay; - sb->format = sdev->format; - sb->pitch[0] = sdev->pitch; - sb->map[0] = sdev->screen_base; - - return 0; -} - static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = { - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = simpledrm_primary_plane_helper_atomic_check, - .atomic_update = simpledrm_primary_plane_helper_atomic_update, - .atomic_disable = simpledrm_primary_plane_helper_atomic_disable, - .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer, + DRM_SYSFB_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs simpledrm_primary_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, + DRM_SYSFB_PLANE_FUNCS, .destroy = drm_plane_cleanup, - DRM_GEM_SHADOW_PLANE_FUNCS, }; -static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) -{ - struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev); - - return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode); -} - -/* - * The CRTC is always enabled. Screen updates are performed by - * the primary plane's atomic_update function. Disabling clears - * the screen in the primary plane's atomic_disable function. - */ static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = { - .mode_valid = simpledrm_crtc_helper_mode_valid, - .atomic_check = drm_crtc_helper_atomic_check, + DRM_SYSFB_CRTC_HELPER_FUNCS, }; static const struct drm_crtc_funcs simpledrm_crtc_funcs = { - .reset = drm_atomic_helper_crtc_reset, + DRM_SYSFB_CRTC_FUNCS, .destroy = drm_crtc_cleanup, - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; static const struct drm_encoder_funcs simpledrm_encoder_funcs = { .destroy = drm_encoder_cleanup, }; -static int simpledrm_connector_helper_get_modes(struct drm_connector *connector) -{ - struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev); - - return drm_connector_helper_get_modes_fixed(connector, &sdev->mode); -} - static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = { - .get_modes = simpledrm_connector_helper_get_modes, + DRM_SYSFB_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs simpledrm_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, + DRM_SYSFB_CONNECTOR_FUNCS, .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = { - .fb_create = drm_gem_fb_create_with_dirty, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, + DRM_SYSFB_MODE_CONFIG_FUNCS, }; /* * Init / Cleanup */ -static struct drm_display_mode simpledrm_mode(unsigned int width, - unsigned int height, - unsigned int width_mm, - unsigned int height_mm) -{ - const struct drm_display_mode mode = { - DRM_MODE_INIT(60, width, height, width_mm, height_mm) - }; - - return mode; -} - static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, struct platform_device *pdev) { const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev); struct device_node *of_node = pdev->dev.of_node; struct simpledrm_device *sdev; + struct drm_sysfb_device *sysfb; struct drm_device *dev; int width, height, stride; int width_mm = 0, height_mm = 0; @@ -793,10 +615,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, size_t nformats; int ret; - sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev); + sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, sysfb.dev); if (IS_ERR(sdev)) return ERR_CAST(sdev); - dev = &sdev->dev; + sysfb = &sdev->sysfb; + dev = &sysfb->dev; platform_set_drvdata(pdev, sdev); /* @@ -858,20 +681,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, return ERR_PTR(-EINVAL); } - /* - * Assume a monitor resolution of 96 dpi if physical dimensions - * are not specified to get a somewhat reasonable screen size. - */ - if (!width_mm) - width_mm = DRM_MODE_RES_MM(width, 96ul); - if (!height_mm) - height_mm = DRM_MODE_RES_MM(height, 96ul); - - sdev->mode = simpledrm_mode(width, height, width_mm, height_mm); - sdev->format = format; - sdev->pitch = stride; + sysfb->fb_mode = drm_sysfb_mode(width, height, width_mm, height_mm); + sysfb->fb_format = format; + sysfb->fb_pitch = stride; - drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode)); + drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode)); drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n", &format->format, width, height, stride); @@ -895,7 +709,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (IS_ERR(screen_base)) return screen_base; - iosys_map_set_vaddr(&sdev->screen_base, screen_base); + iosys_map_set_vaddr(&sysfb->fb_addr, screen_base); } else { void __iomem *screen_base; @@ -928,7 +742,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, if (!screen_base) return ERR_PTR(-ENOMEM); - iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base); + iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); } /* @@ -1027,19 +841,21 @@ static struct drm_driver simpledrm_driver = { static int simpledrm_probe(struct platform_device *pdev) { struct simpledrm_device *sdev; + struct drm_sysfb_device *sysfb; struct drm_device *dev; int ret; sdev = simpledrm_device_create(&simpledrm_driver, pdev); if (IS_ERR(sdev)) return PTR_ERR(sdev); - dev = &sdev->dev; + sysfb = &sdev->sysfb; + dev = &sysfb->dev; ret = drm_dev_register(dev, 0); if (ret) return ret; - drm_client_setup(dev, sdev->format); + drm_client_setup(dev, sdev->sysfb.fb_format); return 0; } @@ -1047,7 +863,7 @@ static int simpledrm_probe(struct platform_device *pdev) static void simpledrm_remove(struct platform_device *pdev) { struct simpledrm_device *sdev = platform_get_drvdata(pdev); - struct drm_device *dev = &sdev->dev; + struct drm_device *dev = &sdev->sysfb.dev; drm_dev_unplug(dev); } diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c new file mode 100644 index 000000000000..4d62c78e7d1e --- /dev/null +++ b/drivers/gpu/drm/sysfb/vesadrm.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/aperture.h> +#include <linux/ioport.h> +#include <linux/limits.h> +#include <linux/platform_device.h> +#include <linux/screen_info.h> + +#include <drm/clients/drm_client_setup.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_connector.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_edid.h> +#include <drm/drm_fbdev_shmem.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_probe_helper.h> + +#include <video/edid.h> +#include <video/pixel_format.h> +#include <video/vga.h> + +#include "drm_sysfb_helper.h" + +#define DRIVER_NAME "vesadrm" +#define DRIVER_DESC "DRM driver for VESA platform devices" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +#define VESADRM_GAMMA_LUT_SIZE 256 + +static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *dev, + const struct screen_info *si) +{ + static const struct drm_sysfb_format formats[] = { + { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, }, + { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, }, + { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, }, + { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, }, + { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, }, + }; + + return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si); +} + +/* + * VESA device + */ + +struct vesadrm_device { + struct drm_sysfb_device sysfb; + +#if defined(CONFIG_X86_32) + /* VESA Protected Mode interface */ + struct { + const u8 *PrimaryPalette; + } pmi; +#endif + + void (*cmap_write)(struct vesadrm_device *vesa, unsigned int index, + u16 red, u16 green, u16 blue); + + /* modesetting */ + u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)]; + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; +}; + +static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev) +{ + return container_of(to_drm_sysfb_device(dev), struct vesadrm_device, sysfb); +} + +/* + * Palette + */ + +static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index, + u16 red, u16 green, u16 blue) +{ + u8 i8, r8, g8, b8; + + if (index > 255) + return; + + i8 = index; + r8 = red >> 8; + g8 = green >> 8; + b8 = blue >> 8; + + outb_p(i8, VGA_PEL_IW); + outb_p(r8, VGA_PEL_D); + outb_p(g8, VGA_PEL_D); + outb_p(b8, VGA_PEL_D); +} + +#if defined(CONFIG_X86_32) +static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int index, + u16 red, u16 green, u16 blue) +{ + u32 i32 = index; + struct { + u8 b8; + u8 g8; + u8 r8; + u8 x8; + } PaletteEntry = { + blue >> 8, + green >> 8, + red >> 8, + 0x00, + }; + + if (index > 255) + return; + + __asm__ __volatile__ ( + "call *(%%esi)" + : /* no return value */ + : "a" (0x4f09), + "b" (0), + "c" (1), + "d" (i32), + "D" (&PaletteEntry), + "S" (&vesa->pmi.PrimaryPalette)); +} +#endif + +static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa, + const struct drm_format_info *format) +{ + struct drm_device *dev = &vesa->sysfb.dev; + size_t i; + u16 r16, g16, b16; + + switch (format->format) { + case DRM_FORMAT_XRGB1555: + for (i = 0; i < 32; ++i) { + r16 = i * 8 + i / 4; + r16 |= (r16 << 8) | r16; + vesa->cmap_write(vesa, i, r16, r16, r16); + } + break; + case DRM_FORMAT_RGB565: + for (i = 0; i < 32; ++i) { + r16 = i * 8 + i / 4; + r16 |= (r16 << 8) | r16; + g16 = i * 4 + i / 16; + g16 |= (g16 << 8) | g16; + b16 = r16; + vesa->cmap_write(vesa, i, r16, g16, b16); + } + for (i = 32; i < 64; ++i) { + g16 = i * 4 + i / 16; + g16 |= (g16 << 8) | g16; + vesa->cmap_write(vesa, i, 0, g16, 0); + } + break; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_BGRX8888: + for (i = 0; i < 256; ++i) { + r16 = (i << 8) | i; + vesa->cmap_write(vesa, i, r16, r16, r16); + } + break; + default: + drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n", + &format->format); + break; + } +} + +static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa, + const struct drm_format_info *format, + struct drm_color_lut *lut) +{ + struct drm_device *dev = &vesa->sysfb.dev; + size_t i; + u16 r16, g16, b16; + + switch (format->format) { + case DRM_FORMAT_XRGB1555: + for (i = 0; i < 32; ++i) { + r16 = lut[i * 8 + i / 4].red; + g16 = lut[i * 8 + i / 4].green; + b16 = lut[i * 8 + i / 4].blue; + vesa->cmap_write(vesa, i, r16, g16, b16); + } + break; + case DRM_FORMAT_RGB565: + for (i = 0; i < 32; ++i) { + r16 = lut[i * 8 + i / 4].red; + g16 = lut[i * 4 + i / 16].green; + b16 = lut[i * 8 + i / 4].blue; + vesa->cmap_write(vesa, i, r16, g16, b16); + } + for (i = 32; i < 64; ++i) + vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0); + break; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_BGRX8888: + for (i = 0; i < 256; ++i) + vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue); + break; + default: + drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n", + &format->format); + break; + } +} + +/* + * Modesetting + */ + +static const u64 vesadrm_primary_plane_format_modifiers[] = { + DRM_SYSFB_PLANE_FORMAT_MODIFIERS, +}; + +static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = { + DRM_SYSFB_PLANE_HELPER_FUNCS, +}; + +static const struct drm_plane_funcs vesadrm_primary_plane_funcs = { + DRM_SYSFB_PLANE_FUNCS, + .destroy = drm_plane_cleanup, +}; + +static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); + struct vesadrm_device *vesa = to_vesadrm_device(dev); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); + + /* + * The gamma LUT has to be reloaded after changing the primary + * plane's color format. + */ + if (crtc_state->enable && crtc_state->color_mgmt_changed) { + if (sysfb_crtc_state->format == sysfb->fb_format) { + if (crtc_state->gamma_lut) + vesadrm_set_gamma_lut(vesa, + sysfb_crtc_state->format, + crtc_state->gamma_lut->data); + else + vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format); + } else { + vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format); + } + } +} + +static const struct drm_crtc_helper_funcs vesadrm_crtc_helper_funcs = { + DRM_SYSFB_CRTC_HELPER_FUNCS, + .atomic_flush = vesadrm_crtc_helper_atomic_flush, +}; + +static const struct drm_crtc_funcs vesadrm_crtc_funcs = { + DRM_SYSFB_CRTC_FUNCS, + .destroy = drm_crtc_cleanup, +}; + +static const struct drm_encoder_funcs vesadrm_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static const struct drm_connector_helper_funcs vesadrm_connector_helper_funcs = { + DRM_SYSFB_CONNECTOR_HELPER_FUNCS, +}; + +static const struct drm_connector_funcs vesadrm_connector_funcs = { + DRM_SYSFB_CONNECTOR_FUNCS, + .destroy = drm_connector_cleanup, +}; + +static const struct drm_mode_config_funcs vesadrm_mode_config_funcs = { + DRM_SYSFB_MODE_CONFIG_FUNCS, +}; + +/* + * Init / Cleanup + */ + +static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv, + struct platform_device *pdev) +{ + const struct screen_info *si; + const struct drm_format_info *format; + int width, height, stride; + u64 vsize; + struct resource resbuf; + struct resource *res; + struct vesadrm_device *vesa; + struct drm_sysfb_device *sysfb; + struct drm_device *dev; + struct resource *mem = NULL; + void __iomem *screen_base; + struct drm_plane *primary_plane; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + unsigned long max_width, max_height; + size_t nformats; + int ret; + + si = dev_get_platdata(&pdev->dev); + if (!si) + return ERR_PTR(-ENODEV); + if (screen_info_video_type(si) != VIDEO_TYPE_VLFB) + return ERR_PTR(-ENODEV); + + /* + * VESA DRM driver + */ + + vesa = devm_drm_dev_alloc(&pdev->dev, drv, struct vesadrm_device, sysfb.dev); + if (IS_ERR(vesa)) + return ERR_CAST(vesa); + sysfb = &vesa->sysfb; + dev = &sysfb->dev; + platform_set_drvdata(pdev, dev); + + /* + * Hardware settings + */ + + format = vesadrm_get_format_si(dev, si); + if (!format) + return ERR_PTR(-EINVAL); + width = drm_sysfb_get_width_si(dev, si); + if (width < 0) + return ERR_PTR(width); + height = drm_sysfb_get_height_si(dev, si); + if (height < 0) + return ERR_PTR(height); + res = drm_sysfb_get_memory_si(dev, si, &resbuf); + if (!res) + return ERR_PTR(-EINVAL); + stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res)); + if (stride < 0) + return ERR_PTR(stride); + vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res)); + if (!vsize) + return ERR_PTR(-EINVAL); + + drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n", + &format->format, width, height, stride); + + if (!__screen_info_vbe_mode_nonvga(si)) { + vesa->cmap_write = vesadrm_vga_cmap_write; +#if defined(CONFIG_X86_32) + } else { + phys_addr_t pmi_base = __screen_info_vesapm_info_base(si); + const u16 *pmi_addr = phys_to_virt(pmi_base); + + vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2]; + vesa->cmap_write = vesadrm_pmi_cmap_write; +#endif + } + +#ifdef CONFIG_X86 + if (drm_edid_header_is_valid(edid_info.dummy) == 8) + sysfb->edid = edid_info.dummy; +#endif + sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0); + sysfb->fb_format = format; + sysfb->fb_pitch = stride; + if (vesa->cmap_write) + sysfb->fb_gamma_lut_size = VESADRM_GAMMA_LUT_SIZE; + + /* + * Memory management + */ + + ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize); + if (ret) { + drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret); + return ERR_PTR(ret); + } + + drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res); + + mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name); + if (!mem) { + /* + * We cannot make this fatal. Sometimes this comes from magic + * spaces our resource handlers simply don't know about. Use + * the I/O-memory resource as-is and try to map that instead. + */ + drm_warn(dev, "could not acquire memory region %pr\n", res); + mem = res; + } + + screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem)); + if (!screen_base) + return ERR_PTR(-ENOMEM); + iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); + + /* + * Modesetting + */ + + ret = drmm_mode_config_init(dev); + if (ret) + return ERR_PTR(ret); + + max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH); + max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT); + + dev->mode_config.min_width = width; + dev->mode_config.max_width = max_width; + dev->mode_config.min_height = height; + dev->mode_config.max_height = max_height; + dev->mode_config.preferred_depth = format->depth; + dev->mode_config.funcs = &vesadrm_mode_config_funcs; + + /* Primary plane */ + + nformats = drm_fb_build_fourcc_list(dev, &format->format, 1, + vesa->formats, ARRAY_SIZE(vesa->formats)); + + primary_plane = &vesa->primary_plane; + ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs, + vesa->formats, nformats, + vesadrm_primary_plane_format_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ERR_PTR(ret); + drm_plane_helper_add(primary_plane, &vesadrm_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); + + /* CRTC */ + + crtc = &vesa->crtc; + ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, + &vesadrm_crtc_funcs, NULL); + if (ret) + return ERR_PTR(ret); + drm_crtc_helper_add(crtc, &vesadrm_crtc_helper_funcs); + + if (sysfb->fb_gamma_lut_size) { + ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size); + if (!ret) + drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size); + } + + /* Encoder */ + + encoder = &vesa->encoder; + ret = drm_encoder_init(dev, encoder, &vesadrm_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ERR_PTR(ret); + encoder->possible_crtcs = drm_crtc_mask(crtc); + + /* Connector */ + + connector = &vesa->connector; + ret = drm_connector_init(dev, connector, &vesadrm_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + if (ret) + return ERR_PTR(ret); + drm_connector_helper_add(connector, &vesadrm_connector_helper_funcs); + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + width, height); + if (sysfb->edid) + drm_connector_attach_edid_property(connector); + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ERR_PTR(ret); + + drm_mode_config_reset(dev); + + return vesa; +} + +/* + * DRM driver + */ + +DEFINE_DRM_GEM_FOPS(vesadrm_fops); + +static struct drm_driver vesadrm_driver = { + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, + .fops = &vesadrm_fops, +}; + +/* + * Platform driver + */ + +static int vesadrm_probe(struct platform_device *pdev) +{ + struct vesadrm_device *vesa; + struct drm_sysfb_device *sysfb; + struct drm_device *dev; + int ret; + + vesa = vesadrm_device_create(&vesadrm_driver, pdev); + if (IS_ERR(vesa)) + return PTR_ERR(vesa); + sysfb = &vesa->sysfb; + dev = &sysfb->dev; + + ret = drm_dev_register(dev, 0); + if (ret) + return ret; + + drm_client_setup(dev, sysfb->fb_format); + + return 0; +} + +static void vesadrm_remove(struct platform_device *pdev) +{ + struct drm_device *dev = platform_get_drvdata(pdev); + + drm_dev_unplug(dev); +} + +static struct platform_driver vesadrm_platform_driver = { + .driver = { + .name = "vesa-framebuffer", + }, + .probe = vesadrm_probe, + .remove = vesadrm_remove, +}; + +module_platform_driver(vesadrm_platform_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c index 08fbd8f151a1..990e744b0923 100644 --- a/drivers/gpu/drm/tegra/dp.c +++ b/drivers/gpu/drm/tegra/dp.c @@ -256,73 +256,6 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) } /** - * drm_dp_link_power_up() - power up a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D0; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - /* - * According to the DP 1.1 specification, a "Sink Device must exit the - * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink - * Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - - return 0; -} - -/** - * drm_dp_link_power_down() - power down a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D3; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - return 0; -} - -/** * drm_dp_link_configure() - configure a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h index cb12ed0c54e7..695060cafac0 100644 --- a/drivers/gpu/drm/tegra/dp.h +++ b/drivers/gpu/drm/tegra/dp.h @@ -164,8 +164,6 @@ int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate); void drm_dp_link_update_rates(struct drm_dp_link *link); int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); -int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); -int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_choose(struct drm_dp_link *link, const struct drm_display_mode *mode, diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 9bb077558167..b5089b772267 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) static int tegra_dsi_probe(struct platform_device *pdev) { struct tegra_dsi *dsi; - struct resource *regs; int err; dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); @@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev) goto remove; } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dsi->regs = devm_ioremap_resource(&pdev->dev, regs); + dsi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->regs)) { err = PTR_ERR(dsi->regs); goto remove; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index f98f70eda906..21f3dfdcc5c9 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2666,7 +2666,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder) * the AUX transactions would just be timing out. */ if (output->connector.status != connector_status_disconnected) { - err = drm_dp_link_power_down(sor->aux, &sor->link); + err = drm_dp_link_power_down(sor->aux, sor->link.revision); if (err < 0) dev_err(sor->dev, "failed to power down link: %d\n", err); @@ -2882,7 +2882,7 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder) else dev_dbg(sor->dev, "link training succeeded\n"); - err = drm_dp_link_power_up(sor->aux, &sor->link); + err = drm_dp_link_power_up(sor->aux, sor->link.revision); if (err < 0) dev_err(sor->dev, "failed to power up DP link: %d\n", err); diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 0109bcf7faa5..3afd6587df08 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -4,7 +4,9 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \ drm_kunit_helpers.o obj-$(CONFIG_DRM_KUNIT_TEST) += \ + drm_atomic_test.o \ drm_atomic_state_test.o \ + drm_bridge_test.o \ drm_buddy_test.o \ drm_cmdline_parser_test.o \ drm_connector_test.o \ diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c new file mode 100644 index 000000000000..ea91bec6569e --- /dev/null +++ b/drivers/gpu/drm/tests/drm_atomic_test.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kunit test for drm_atomic functions + */ +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_atomic_uapi.h> +#include <drm/drm_encoder.h> +#include <drm/drm_kunit_helpers.h> +#include <drm/drm_modeset_helper_vtables.h> + +#include <kunit/test.h> + +struct drm_atomic_test_priv { + struct drm_device drm; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_encoder encoder; + struct drm_connector connector; +}; + +static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = { +}; + +static const struct drm_connector_funcs drm_atomic_init_connector_funcs = { + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .reset = drm_atomic_helper_connector_reset, +}; + +static struct drm_atomic_test_priv *create_device(struct kunit *test) +{ + struct drm_atomic_test_priv *priv; + struct drm_connector *connector; + struct drm_encoder *enc; + struct drm_device *drm; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + if (IS_ERR(dev)) + return ERR_CAST(dev); + + priv = drm_kunit_helper_alloc_drm_device(test, dev, + struct drm_atomic_test_priv, drm, + DRIVER_MODESET | DRIVER_ATOMIC); + if (IS_ERR(priv)) + return ERR_CAST(priv); + + drm = &priv->drm; + plane = drm_kunit_helper_create_primary_plane(test, drm, + NULL, + NULL, + NULL, 0, + NULL); + if (IS_ERR(plane)) + return ERR_CAST(plane); + priv->plane = plane; + + crtc = drm_kunit_helper_create_crtc(test, drm, + plane, NULL, + NULL, + NULL); + if (IS_ERR(crtc)) + return ERR_CAST(crtc); + priv->crtc = crtc; + + enc = &priv->encoder; + ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ERR_PTR(ret); + + enc->possible_crtcs = drm_crtc_mask(crtc); + + connector = &priv->connector; + ret = drmm_connector_init(drm, connector, + &drm_atomic_init_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + NULL); + if (ret) + return ERR_PTR(ret); + + drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs); + + drm_connector_attach_encoder(connector, enc); + + drm_mode_config_reset(drm); + + return priv; +} + +static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_test_priv *priv; + struct drm_display_mode *mode; + struct drm_connector *curr_connector; + int ret; + + priv = create_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + drm_modeset_acquire_init(&ctx, 0); + +retry_enable: + ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm, + priv->crtc, &priv->connector, + mode, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_enable; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + drm_modeset_acquire_init(&ctx, 0); + +retry_conn: + curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder, + &ctx); + if (PTR_ERR(curr_connector) == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_conn; + } + KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = { + KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder), + { } +}; + + +static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = { + .name = "drm_test_atomic_get_connector_for_encoder", + .test_cases = drm_atomic_get_connector_for_encoder_tests, +}; + +kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite); + +MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>"); +MODULE_DESCRIPTION("Kunit test for drm_atomic functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c new file mode 100644 index 000000000000..ff88ec2e911c --- /dev/null +++ b/drivers/gpu/drm/tests/drm_bridge_test.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kunit test for drm_bridge functions + */ +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_bridge_helper.h> +#include <drm/drm_kunit_helpers.h> + +#include <kunit/test.h> + +struct drm_bridge_init_priv { + struct drm_device drm; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_connector *connector; + unsigned int enable_count; + unsigned int disable_count; +}; + +static void drm_test_bridge_enable(struct drm_bridge *bridge) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->enable_count++; +} + +static void drm_test_bridge_disable(struct drm_bridge *bridge) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->disable_count++; +} + +static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = { + .enable = drm_test_bridge_enable, + .disable = drm_test_bridge_disable, +}; + +static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->enable_count++; +} + +static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->disable_count++; +} + +static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = { + .atomic_enable = drm_test_bridge_atomic_enable, + .atomic_disable = drm_test_bridge_atomic_disable, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper, + drm_bridge_remove, + struct drm_bridge *); + +static int drm_kunit_bridge_add(struct kunit *test, + struct drm_bridge *bridge) +{ + drm_bridge_add(bridge); + + return kunit_add_action_or_reset(test, + drm_bridge_remove_wrapper, + bridge); +} + +static struct drm_bridge_init_priv * +drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs) +{ + struct drm_bridge_init_priv *priv; + struct drm_encoder *enc; + struct drm_bridge *bridge; + struct drm_device *drm; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + if (IS_ERR(dev)) + return ERR_CAST(dev); + + priv = drm_kunit_helper_alloc_drm_device(test, dev, + struct drm_bridge_init_priv, drm, + DRIVER_MODESET | DRIVER_ATOMIC); + if (IS_ERR(priv)) + return ERR_CAST(priv); + + drm = &priv->drm; + priv->plane = drm_kunit_helper_create_primary_plane(test, drm, + NULL, + NULL, + NULL, 0, + NULL); + if (IS_ERR(priv->plane)) + return ERR_CAST(priv->plane); + + priv->crtc = drm_kunit_helper_create_crtc(test, drm, + priv->plane, NULL, + NULL, + NULL); + if (IS_ERR(priv->crtc)) + return ERR_CAST(priv->crtc); + + enc = &priv->encoder; + ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ERR_PTR(ret); + + enc->possible_crtcs = drm_crtc_mask(priv->crtc); + + bridge = &priv->bridge; + bridge->type = DRM_MODE_CONNECTOR_VIRTUAL; + bridge->funcs = funcs; + + ret = drm_kunit_bridge_add(test, bridge); + if (ret) + return ERR_PTR(ret); + + ret = drm_bridge_attach(enc, bridge, NULL, 0); + if (ret) + return ERR_PTR(ret); + + priv->connector = drm_bridge_connector_init(drm, enc); + if (IS_ERR(priv->connector)) + return ERR_CAST(priv->connector); + + drm_connector_attach_encoder(priv->connector, enc); + + drm_mode_config_reset(drm); + + return priv; +} + +/* + * Test that drm_bridge_get_current_state() returns the last committed + * state for an atomic bridge. + */ +static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_bridge_state *curr_bridge_state; + struct drm_bridge_state *bridge_state; + struct drm_atomic_state *state; + struct drm_bridge *bridge; + struct drm_device *drm; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + drm_modeset_acquire_init(&ctx, 0); + + drm = &priv->drm; + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); + +retry_commit: + bridge = &priv->bridge; + bridge_state = drm_atomic_get_bridge_state(state, bridge); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state); + + ret = drm_atomic_commit(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry_commit; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + drm_modeset_acquire_init(&ctx, 0); + +retry_state: + ret = drm_modeset_lock(&bridge->base.lock, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_state; + } + + curr_bridge_state = drm_bridge_get_current_state(bridge); + KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state); + + drm_modeset_unlock(&bridge->base.lock); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +/* + * Test that drm_bridge_get_current_state() returns NULL for a + * non-atomic bridge. + */ +static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test) +{ + struct drm_bridge_init_priv *priv; + struct drm_bridge *bridge; + + priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + /* + * NOTE: Strictly speaking, we should take the bridge->base.lock + * before calling that function. However, bridge->base is only + * initialized if the bridge is atomic, while we explicitly + * initialize one that isn't there. + * + * In order to avoid unnecessary warnings, let's skip the + * locking. The function would return NULL in all cases anyway, + * so we don't really have any concurrency to worry about. + */ + bridge = &priv->bridge; + KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge)); +} + +static struct kunit_case drm_bridge_get_current_state_tests[] = { + KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic), + KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy), + { } +}; + + +static struct kunit_suite drm_bridge_get_current_state_test_suite = { + .name = "drm_test_bridge_get_current_state", + .test_cases = drm_bridge_get_current_state_tests, +}; + +/* + * Test that an atomic bridge is properly power-cycled when calling + * drm_bridge_helper_reset_crtc(). + */ +static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_display_mode *mode; + struct drm_bridge *bridge; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + drm_modeset_acquire_init(&ctx, 0); + +retry_commit: + ret = drm_kunit_helper_enable_crtc_connector(test, + &priv->drm, priv->crtc, + priv->connector, + mode, + &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_commit; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + bridge = &priv->bridge; + KUNIT_ASSERT_EQ(test, priv->enable_count, 1); + KUNIT_ASSERT_EQ(test, priv->disable_count, 0); + + drm_modeset_acquire_init(&ctx, 0); + +retry_reset: + ret = drm_bridge_helper_reset_crtc(bridge, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_reset; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + KUNIT_EXPECT_EQ(test, priv->enable_count, 2); + KUNIT_EXPECT_EQ(test, priv->disable_count, 1); +} + +/* + * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic + * bridge will fail and not call the enable / disable callbacks + */ +static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_display_mode *mode; + struct drm_bridge *bridge; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + bridge = &priv->bridge; + KUNIT_ASSERT_EQ(test, priv->enable_count, 0); + KUNIT_ASSERT_EQ(test, priv->disable_count, 0); + + drm_modeset_acquire_init(&ctx, 0); + +retry_reset: + ret = drm_bridge_helper_reset_crtc(bridge, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_reset; + } + KUNIT_EXPECT_LT(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + KUNIT_EXPECT_EQ(test, priv->enable_count, 0); + KUNIT_EXPECT_EQ(test, priv->disable_count, 0); +} + +/* + * Test that a non-atomic bridge is properly power-cycled when calling + * drm_bridge_helper_reset_crtc(). + */ +static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_display_mode *mode; + struct drm_bridge *bridge; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + drm_modeset_acquire_init(&ctx, 0); + +retry_commit: + ret = drm_kunit_helper_enable_crtc_connector(test, + &priv->drm, priv->crtc, + priv->connector, + mode, + &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_commit; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + bridge = &priv->bridge; + KUNIT_ASSERT_EQ(test, priv->enable_count, 1); + KUNIT_ASSERT_EQ(test, priv->disable_count, 0); + + drm_modeset_acquire_init(&ctx, 0); + +retry_reset: + ret = drm_bridge_helper_reset_crtc(bridge, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_reset; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + KUNIT_EXPECT_EQ(test, priv->enable_count, 2); + KUNIT_EXPECT_EQ(test, priv->disable_count, 1); +} + +static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = { + KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic), + KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled), + KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy), + { } +}; + +static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = { + .name = "drm_test_bridge_helper_reset_crtc", + .test_cases = drm_bridge_helper_reset_crtc_tests, +}; + +kunit_test_suites( + &drm_bridge_get_current_state_test_suite, + &drm_bridge_helper_reset_crtc_test_suite, +); + +MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>"); +MODULE_DESCRIPTION("Kunit test for drm_bridge functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c index b2fdb1a774fe..3f44fe5e92e4 100644 --- a/drivers/gpu/drm/tests/drm_client_modeset_test.c +++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c @@ -88,7 +88,8 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test) struct drm_device *drm = priv->drm; struct drm_connector *connector = &priv->connector; struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode; - struct drm_display_mode *expected_mode, *mode; + struct drm_display_mode *expected_mode; + const struct drm_display_mode *mode; const char *cmdline = "1920x1080@60"; int ret; diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index 925fbc2cda70..68f2c3162354 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test) shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); KUNIT_EXPECT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0); ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); KUNIT_ASSERT_EQ(test, ret, 0); @@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test) ret = drm_gem_shmem_pin(shmem); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_ASSERT_NOT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1); for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++) KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]); drm_gem_shmem_unpin(shmem); KUNIT_EXPECT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0); } /* @@ -168,24 +168,24 @@ static void drm_gem_shmem_test_vmap(struct kunit *test) shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); KUNIT_EXPECT_NULL(test, shmem->vaddr); - KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0); ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); KUNIT_ASSERT_EQ(test, ret, 0); - ret = drm_gem_shmem_vmap(shmem, &map); + ret = drm_gem_shmem_vmap_locked(shmem, &map); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr); KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map)); - KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1); iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE); for (i = 0; i < TEST_SIZE; i++) KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE); - drm_gem_shmem_vunmap(shmem, &map); + drm_gem_shmem_vunmap_locked(shmem, &map); KUNIT_EXPECT_NULL(test, shmem->vaddr); - KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0); } /* @@ -254,7 +254,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test) sgt = drm_gem_shmem_get_pages_sgt(shmem); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); KUNIT_ASSERT_NOT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1); KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt); for_each_sgtable_sg(sgt, sg, si) { @@ -284,17 +284,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test) ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); KUNIT_ASSERT_EQ(test, ret, 0); - ret = drm_gem_shmem_madvise(shmem, 1); + ret = drm_gem_shmem_madvise_locked(shmem, 1); KUNIT_EXPECT_TRUE(test, ret); KUNIT_ASSERT_EQ(test, shmem->madv, 1); /* Set madv to a negative value */ - ret = drm_gem_shmem_madvise(shmem, -1); + ret = drm_gem_shmem_madvise_locked(shmem, -1); KUNIT_EXPECT_FALSE(test, ret); KUNIT_ASSERT_EQ(test, shmem->madv, -1); /* Check that madv cannot be set back to a positive value */ - ret = drm_gem_shmem_madvise(shmem, 0); + ret = drm_gem_shmem_madvise_locked(shmem, 0); KUNIT_EXPECT_FALSE(test, ret); KUNIT_ASSERT_EQ(test, shmem->madv, -1); } @@ -322,7 +322,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test) ret = drm_gem_shmem_is_purgeable(shmem); KUNIT_EXPECT_FALSE(test, ret); - ret = drm_gem_shmem_madvise(shmem, 1); + ret = drm_gem_shmem_madvise_locked(shmem, 1); KUNIT_EXPECT_TRUE(test, ret); /* The scatter/gather table will be freed by drm_gem_shmem_free */ @@ -332,7 +332,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test) ret = drm_gem_shmem_is_purgeable(shmem); KUNIT_EXPECT_TRUE(test, ret); - drm_gem_shmem_purge(shmem); + drm_gem_shmem_purge_locked(shmem); KUNIT_EXPECT_NULL(test, shmem->pages); KUNIT_EXPECT_NULL(test, shmem->sgt); KUNIT_EXPECT_EQ(test, shmem->madv, -1); diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index e97efd3af9ed..7ffd666753b1 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -55,49 +55,6 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec return preferred; } -static int light_up_connector(struct kunit *test, - struct drm_device *drm, - struct drm_crtc *crtc, - struct drm_connector *connector, - struct drm_display_mode *mode, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_atomic_state *state; - struct drm_connector_state *conn_state; - struct drm_crtc_state *crtc_state; - int ret; - - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); - -retry: - conn_state = drm_atomic_get_connector_state(state, connector); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); - - ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); - if (ret == -EDEADLK) { - drm_atomic_state_clear(state); - ret = drm_modeset_backoff(ctx); - if (!ret) - goto retry; - } - KUNIT_EXPECT_EQ(test, ret, 0); - - crtc_state = drm_atomic_get_crtc_state(state, crtc); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); - - ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); - KUNIT_EXPECT_EQ(test, ret, 0); - - crtc_state->enable = true; - crtc_state->active = true; - - ret = drm_atomic_commit(state); - KUNIT_ASSERT_EQ(test, ret, 0); - - return 0; -} - static int set_connector_edid(struct kunit *test, struct drm_connector *connector, const char *edid, size_t edid_len) { @@ -298,7 +255,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -364,7 +324,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -432,7 +395,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -489,7 +455,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -547,7 +516,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -606,7 +578,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -666,7 +641,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -725,7 +703,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -789,7 +770,10 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -865,7 +849,10 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -941,7 +928,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -988,7 +978,10 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1037,7 +1030,10 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1086,7 +1082,10 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1134,7 +1133,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); /* You shouldn't be doing that at home. */ @@ -1208,7 +1210,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1282,7 +1287,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1347,7 +1355,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1414,7 +1425,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1483,7 +1497,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1543,7 +1560,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1605,7 +1625,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1645,7 +1668,10 @@ static void drm_test_check_disable_connector(struct kunit *test) drm = &priv->drm; crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index 6f6616cf4966..5f7257840d8e 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -2,6 +2,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_uapi.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> @@ -271,6 +272,66 @@ drm_kunit_helper_create_crtc(struct kunit *test, } EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); +/** + * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output + * @test: The test context object + * @drm: The device to alloc the plane for + * @crtc: The CRTC to enable + * @connector: The Connector to enable + * @mode: The display mode to configure the CRTC with + * @ctx: Locking context + * + * This function creates an atomic update to enable the route from @crtc + * to @connector, with the given @mode. + * + * Returns: + * + * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error + * returned is EDEADLK, the entire atomic sequence must be restarted. + */ +int drm_kunit_helper_enable_crtc_connector(struct kunit *test, + struct drm_device *drm, + struct drm_crtc *crtc, + struct drm_connector *connector, + const struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + int ret; + + state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + if (IS_ERR(state)) + return PTR_ERR(state); + + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + + ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); + if (ret) + return ret; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); + if (ret) + return ret; + + crtc_state->enable = true; + crtc_state->active = true; + + ret = drm_atomic_commit(state); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector); + static void kunit_action_drm_mode_destroy(void *ptr) { struct drm_display_mode *mode = ptr; diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c index 17a86bed8054..95b4aeff2775 100644 --- a/drivers/gpu/drm/tidss/tidss_encoder.c +++ b/drivers/gpu/drm/tidss/tidss_encoder.c @@ -34,11 +34,12 @@ static inline struct tidss_encoder } static int tidss_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge); - return drm_bridge_attach(bridge->encoder, t_enc->next_bridge, + return drm_bridge_attach(encoder, t_enc->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 54c84c9801c1..ad4dab525f93 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -65,20 +65,6 @@ config DRM_GM12U320 This is a KMS driver for projectors which use the GM12U320 chipset for video transfer over USB2/3, such as the Acer C120 mini projector. -config DRM_OFDRM - tristate "Open Firmware display driver" - depends on DRM && MMU && OF && (PPC || COMPILE_TEST) - select APERTURE_HELPERS - select DRM_CLIENT_SELECTION - select DRM_GEM_SHMEM_HELPER - select DRM_KMS_HELPER - help - DRM driver for Open Firmware framebuffers. - - This driver assumes that the display hardware has been initialized - by the Open Firmware before the kernel boots. Scanout buffer, size, - and display format must be provided via device tree. - config DRM_PANEL_MIPI_DBI tristate "DRM support for MIPI DBI compatible panels" depends on DRM && SPI @@ -95,24 +81,6 @@ config DRM_PANEL_MIPI_DBI https://github.com/notro/panel-mipi-dbi/wiki. To compile this driver as a module, choose M here. -config DRM_SIMPLEDRM - tristate "Simple framebuffer driver" - depends on DRM && MMU - select APERTURE_HELPERS - select DRM_CLIENT_SELECTION - select DRM_GEM_SHMEM_HELPER - select DRM_KMS_HELPER - help - DRM driver for simple platform-provided framebuffers. - - This driver assumes that the display hardware has been initialized - by the firmware or bootloader before the kernel boots. Scanout - buffer, size, and display format must be provided via device tree, - UEFI, VESA, etc. - - On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB - to use UEFI and VESA framebuffers. - config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" depends on DRM && SPI @@ -244,6 +212,18 @@ config TINYDRM_ST7586 If M is selected the module will be called st7586. +config DRM_ST7571_I2C + tristate "DRM support for Sitronix ST7571 display panels (I2C)" + depends on DRM && I2C && MMU + select DRM_CLIENT_SELECTION + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + select REGMAP_I2C + help + DRM driver for Sitronix ST7571 panels controlled over I2C. + + if M is selected the module will be called st7571-i2c. + config TINYDRM_ST7735R tristate "DRM support for Sitronix ST7715R/ST7735R display panels" depends on DRM && SPI diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 0a3a7837a58b..0151590db5cb 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -5,9 +5,8 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o obj-$(CONFIG_DRM_BOCHS) += bochs.o obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o obj-$(CONFIG_DRM_GM12U320) += gm12u320.o -obj-$(CONFIG_DRM_OFDRM) += ofdrm.o obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o -obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o +obj-$(CONFIG_DRM_ST7571_I2C) += st7571-i2c.o obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c index 4370ba22dd88..751b05753c94 100644 --- a/drivers/gpu/drm/tiny/appletbdrm.c +++ b/drivers/gpu/drm/tiny/appletbdrm.c @@ -45,7 +45,7 @@ #define APPLETBDRM_BULK_MSG_TIMEOUT 1000 #define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm) -#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev)) +#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev)) struct appletbdrm_msg_request_header { __le16 unk_00; @@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response { } __packed; struct appletbdrm_device { - struct device *dmadev; - unsigned int in_ep; unsigned int out_ep; @@ -214,7 +212,7 @@ retry: } if (response->msg != expected_response) { - drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n", + drm_err(drm, "Unexpected response from device (expected %p4cl found %p4cl)\n", &expected_response, &response->msg); return -EIO; } @@ -288,7 +286,7 @@ static int appletbdrm_get_information(struct appletbdrm_device *adev) } if (pixel_format != APPLETBDRM_PIXEL_FORMAT) { - drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format); + drm_err(drm, "Encountered unknown pixel format (%p4cl)\n", &pixel_format); ret = -EINVAL; goto free_info; } @@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = { .destroy = drm_encoder_cleanup, }; -static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct appletbdrm_device *adev = drm_to_adev(dev); - - if (!adev->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev); -} - DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops); static const struct drm_driver appletbdrm_drm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = appletbdrm_driver_gem_prime_import, .name = "appletbdrm", .desc = "Apple Touch Bar DRM Driver", .major = 1, @@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf, struct device *dev = &intf->dev; struct appletbdrm_device *adev; struct drm_device *drm = NULL; + struct device *dma_dev; int ret; ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); @@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf, adev->in_ep = bulk_in->bEndpointAddress; adev->out_ep = bulk_out->bEndpointAddress; - adev->dmadev = dev; drm = &adev->drm; usb_set_intfdata(intf, adev); + dma_dev = usb_intf_get_dma_device(intf); + if (dma_dev) { + drm_dev_set_dma_dev(drm, dma_dev); + put_device(dma_dev); + } else { + drm_warn(drm, "buffer sharing not supported"); /* not an error */ + } + ret = appletbdrm_get_information(adev); if (ret) { drm_err(drm, "Failed to get display information\n"); diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c index 52ec1e4ea9e5..ccf3f6551344 100644 --- a/drivers/gpu/drm/tiny/cirrus-qemu.c +++ b/drivers/gpu/drm/tiny/cirrus-qemu.c @@ -70,20 +70,6 @@ struct cirrus_device { #define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev) -struct cirrus_primary_plane_state { - struct drm_shadow_plane_state base; - - /* HW scanout buffer */ - const struct drm_format_info *format; - unsigned int pitch; -}; - -static inline struct cirrus_primary_plane_state * -to_cirrus_primary_plane_state(struct drm_plane_state *plane_state) -{ - return container_of(plane_state, struct cirrus_primary_plane_state, base.base); -}; - /* ------------------------------------------------------------------ */ /* * The meat of this driver. The core passes us a mode and we have to program @@ -144,37 +130,6 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val) iowrite8(val, cirrus->mmio + VGA_DAC_MASK); } -static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb) -{ - if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) { - if (fb->width * 3 <= CIRRUS_MAX_PITCH) - /* convert from XR24 to RG24 */ - return drm_format_info(DRM_FORMAT_RGB888); - else - /* convert from XR24 to RG16 */ - return drm_format_info(DRM_FORMAT_RGB565); - } - return NULL; -} - -static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb) -{ - const struct drm_format_info *format = cirrus_convert_to(fb); - - if (format) - return format; - return fb->format; -} - -static int cirrus_pitch(struct drm_framebuffer *fb) -{ - const struct drm_format_info *format = cirrus_convert_to(fb); - - if (format) - return drm_format_info_min_pitch(format, 0, fb->width); - return fb->pitches[0]; -} - static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset) { u32 addr; @@ -318,7 +273,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch) /* Enable extended blanking and pitch bits, and enable full memory */ cr1b = 0x22; cr1b |= (pitch >> 7) & 0x10; - cr1b |= (pitch >> 6) & 0x40; wreg_crt(cirrus, 0x1b, cr1b); cirrus_set_start_address(cirrus, 0); @@ -342,13 +296,10 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); - struct cirrus_primary_plane_state *new_primary_plane_state = - to_cirrus_primary_plane_state(new_plane_state); struct drm_framebuffer *fb = new_plane_state->fb; struct drm_crtc *new_crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state = NULL; int ret; - unsigned int pitch; if (new_crtc) new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); @@ -362,17 +313,12 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane, else if (!new_plane_state->visible) return 0; - pitch = cirrus_pitch(fb); - /* validate size constraints */ - if (pitch > CIRRUS_MAX_PITCH) + if (fb->pitches[0] > CIRRUS_MAX_PITCH) return -EINVAL; - else if (pitch * fb->height > CIRRUS_VRAM_SIZE) + else if (fb->pitches[0] > CIRRUS_VRAM_SIZE / fb->height) return -EINVAL; - new_primary_plane_state->format = cirrus_format(fb); - new_primary_plane_state->pitch = pitch; - return 0; } @@ -381,15 +327,10 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane, { struct cirrus_device *cirrus = to_cirrus(plane->dev); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); - struct cirrus_primary_plane_state *primary_plane_state = - to_cirrus_primary_plane_state(plane_state); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; - const struct drm_format_info *format = primary_plane_state->format; - unsigned int pitch = primary_plane_state->pitch; struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct cirrus_primary_plane_state *old_primary_plane_state = - to_cirrus_primary_plane_state(old_plane_state); + struct drm_framebuffer *old_fb = old_plane_state->fb; struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram); struct drm_atomic_helper_damage_iter iter; struct drm_rect damage; @@ -401,18 +342,17 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane, if (!drm_dev_enter(&cirrus->dev, &idx)) return; - if (old_primary_plane_state->format != format) - cirrus_format_set(cirrus, format); - if (old_primary_plane_state->pitch != pitch) - cirrus_pitch_set(cirrus, pitch); + if (!old_fb || old_fb->format != fb->format) + cirrus_format_set(cirrus, fb->format); + if (!old_fb || old_fb->pitches[0] != fb->pitches[0]) + cirrus_pitch_set(cirrus, fb->pitches[0]); drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { - unsigned int offset = drm_fb_clip_offset(pitch, format, &damage); + unsigned int offset = drm_fb_clip_offset(fb->pitches[0], fb->format, &damage); struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset); - drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, - &damage, &shadow_plane_state->fmtcnv_state); + drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage); } drm_dev_exit(idx); @@ -424,62 +364,11 @@ static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = { .atomic_update = cirrus_primary_plane_helper_atomic_update, }; -static struct drm_plane_state * -cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane) -{ - struct drm_plane_state *plane_state = plane->state; - struct cirrus_primary_plane_state *primary_plane_state = - to_cirrus_primary_plane_state(plane_state); - struct cirrus_primary_plane_state *new_primary_plane_state; - struct drm_shadow_plane_state *new_shadow_plane_state; - - if (!plane_state) - return NULL; - - new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL); - if (!new_primary_plane_state) - return NULL; - new_shadow_plane_state = &new_primary_plane_state->base; - - __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); - new_primary_plane_state->format = primary_plane_state->format; - new_primary_plane_state->pitch = primary_plane_state->pitch; - - return &new_shadow_plane_state->base; -} - -static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane, - struct drm_plane_state *plane_state) -{ - struct cirrus_primary_plane_state *primary_plane_state = - to_cirrus_primary_plane_state(plane_state); - - __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base); - kfree(primary_plane_state); -} - -static void cirrus_reset_primary_plane(struct drm_plane *plane) -{ - struct cirrus_primary_plane_state *primary_plane_state; - - if (plane->state) { - cirrus_primary_plane_atomic_destroy_state(plane, plane->state); - plane->state = NULL; /* must be set to NULL here */ - } - - primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL); - if (!primary_plane_state) - return; - __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base); -} - static const struct drm_plane_funcs cirrus_primary_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, - .reset = cirrus_reset_primary_plane, - .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state, - .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state, + DRM_GEM_SHADOW_PLANE_FUNCS, }; static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) @@ -614,9 +503,17 @@ static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev const struct drm_display_mode *mode) { const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888); - uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay); + u64 pitch; - if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE) + if (drm_WARN_ON_ONCE(dev, !format)) + return MODE_ERROR; /* driver bug */ + + pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay); + if (!pitch) + return MODE_BAD_WIDTH; + if (pitch > CIRRUS_MAX_PITCH) + return MODE_BAD_WIDTH; /* maximum programmable pitch */ + if (pitch > CIRRUS_VRAM_SIZE / mode->vdisplay) return MODE_MEM; return MODE_OK; diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 41e9bfb2e2ff..fb0004166f4a 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); struct gm12u320_device { struct drm_device dev; - struct device *dmadev; struct drm_simple_display_pipe pipe; struct drm_connector conn; unsigned char *cmd_buf; @@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_INVALID }; -/* - * FIXME: Dma-buf sharing requires DMA support by the importing device. - * This function is a workaround to make USB devices work as well. - * See todo.rst for how to fix the issue in the dma-buf framework. - */ -static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct gm12u320_device *gm12u320 = to_gm12u320(dev); - - if (!gm12u320->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev); -} - DEFINE_DRM_GEM_FOPS(gm12u320_fops); static const struct drm_driver gm12u320_drm_driver = { @@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = gm12u320_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, }; @@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface, { struct gm12u320_device *gm12u320; struct drm_device *dev; + struct device *dma_dev; int ret; /* @@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface, return PTR_ERR(gm12u320); dev = &gm12u320->dev; - gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); - if (!gm12u320->dmadev) + dma_dev = usb_intf_get_dma_device(interface); + if (dma_dev) { + drm_dev_set_dma_dev(dev, dma_dev); + put_device(dma_dev); + } else { drm_warn(dev, "buffer sharing not supported"); /* not an error */ + } INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); ret = drmm_mode_config_init(dev); if (ret) - goto err_put_device; + return ret; dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; @@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface, ret = gm12u320_usb_alloc(gm12u320); if (ret) - goto err_put_device; + return ret; ret = gm12u320_set_ecomode(gm12u320); if (ret) - goto err_put_device; + return ret; ret = gm12u320_conn_init(gm12u320); if (ret) - goto err_put_device; + return ret; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, @@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface, gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) - goto err_put_device; + return ret; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) - goto err_put_device; + return ret; drm_client_setup(dev, NULL); return 0; - -err_put_device: - put_device(gm12u320->dmadev); - return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); - struct gm12u320_device *gm12u320 = to_gm12u320(dev); - put_device(gm12u320->dmadev); - gm12u320->dmadev = NULL; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); } diff --git a/drivers/gpu/drm/tiny/st7571-i2c.c b/drivers/gpu/drm/tiny/st7571-i2c.c new file mode 100644 index 000000000000..dc410ec41baf --- /dev/null +++ b/drivers/gpu/drm/tiny/st7571-i2c.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Sitronix ST7571, a 4 level gray scale dot matrix LCD controller + * + * Copyright (C) 2025 Marcus Folkesson <marcus.folkesson@gmail.com> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include <drm/clients/drm_client_setup.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_shmem.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_module.h> +#include <drm/drm_plane.h> +#include <drm/drm_probe_helper.h> + +#include <video/display_timing.h> +#include <video/of_display_timing.h> + +#define ST7571_COMMAND_MODE (0x00) +#define ST7571_DATA_MODE (0x40) + +/* Normal mode command set */ +#define ST7571_DISPLAY_OFF (0xae) +#define ST7571_DISPLAY_ON (0xaf) +#define ST7571_OSC_ON (0xab) +#define ST7571_SET_COLUMN_LSB(c) (0x00 | FIELD_PREP(GENMASK(3, 0), (c))) +#define ST7571_SET_COLUMN_MSB(c) (0x10 | FIELD_PREP(GENMASK(2, 0), (c) >> 4)) +#define ST7571_SET_COM0_LSB(x) (FIELD_PREP(GENMASK(6, 0), (x))) +#define ST7571_SET_COM0_MSB (0x44) +#define ST7571_SET_COM_SCAN_DIR(d) (0xc0 | FIELD_PREP(GENMASK(3, 3), (d))) +#define ST7571_SET_CONTRAST_LSB(c) (FIELD_PREP(GENMASK(5, 0), (c))) +#define ST7571_SET_CONTRAST_MSB (0x81) +#define ST7571_SET_DISPLAY_DUTY_LSB(d) (FIELD_PREP(GENMASK(7, 0), (d))) +#define ST7571_SET_DISPLAY_DUTY_MSB (0x48) +#define ST7571_SET_ENTIRE_DISPLAY_ON(p) (0xa4 | FIELD_PREP(GENMASK(0, 0), (p))) +#define ST7571_SET_LCD_BIAS(b) (0x50 | FIELD_PREP(GENMASK(2, 0), (b))) +#define ST7571_SET_MODE_LSB(m) (FIELD_PREP(GENMASK(7, 2), (m))) +#define ST7571_SET_MODE_MSB (0x38) +#define ST7571_SET_PAGE(p) (0xb0 | FIELD_PREP(GENMASK(3, 0), (p))) +#define ST7571_SET_POWER(p) (0x28 | FIELD_PREP(GENMASK(2, 0), (p))) +#define ST7571_SET_REGULATOR_REG(r) (0x20 | FIELD_PREP(GENMASK(2, 0), (r))) +#define ST7571_SET_REVERSE(r) (0xa6 | FIELD_PREP(GENMASK(0, 0), (r))) +#define ST7571_SET_SEG_SCAN_DIR(d) (0xa0 | FIELD_PREP(GENMASK(0, 0), (d))) +#define ST7571_SET_START_LINE_LSB(l) (FIELD_PREP(GENMASK(6, 0), (l))) +#define ST7571_SET_START_LINE_MSB (0x40) + +/* Extension command set 3 */ +#define ST7571_COMMAND_SET_3 (0x7b) +#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c))) +#define ST7571_COMMAND_SET_NORMAL (0x00) + +#define ST7571_PAGE_HEIGHT 8 + +#define DRIVER_NAME "st7571" +#define DRIVER_DESC "ST7571 DRM driver" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +enum st7571_color_mode { + ST7571_COLOR_MODE_GRAY = 0, + ST7571_COLOR_MODE_BLACKWHITE = 1, +}; + +struct st7571_device; + +struct st7571_panel_constraints { + u32 min_nlines; + u32 max_nlines; + u32 min_ncols; + u32 max_ncols; + bool support_grayscale; +}; + +struct st7571_panel_data { + int (*init)(struct st7571_device *st7571); + struct st7571_panel_constraints constraints; +}; + +struct st7571_panel_format { + void (*prepare_buffer)(struct st7571_device *st7571, + const struct iosys_map *vmap, + struct drm_framebuffer *fb, + struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state); + int (*update_rect)(struct drm_framebuffer *fb, struct drm_rect *rect); + enum st7571_color_mode mode; + const u8 nformats; + const u32 formats[]; +}; + +struct st7571_device { + struct drm_device dev; + + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + + struct drm_display_mode mode; + + const struct st7571_panel_format *pformat; + const struct st7571_panel_data *pdata; + struct i2c_client *client; + struct gpio_desc *reset; + struct regmap *regmap; + + /* + * Depending on the hardware design, the acknowledge signal may be hard to + * recognize as a valid logic "0" level. + * Therefor, ignore NAK if possible to stay compatible with most hardware designs + * and off-the-shelf panels out there. + * + * From section 6.4 MICROPOCESSOR INTERFACE section in the datasheet: + * + * "By connecting SDA_OUT to SDA_IN externally, the SDA line becomes fully + * I2C interface compatible. + * Separating acknowledge-output from serial data + * input is advantageous for chip-on-glass (COG) applications. In COG + * applications, the ITO resistance and the pull-up resistor will form a + * voltage divider, which affects acknowledge-signal level. Larger ITO + * resistance will raise the acknowledged-signal level and system cannot + * recognize this level as a valid logic “0” level. By separating SDA_IN from + * SDA_OUT, the IC can be used in a mode that ignores the acknowledge-bit. + * For applications which check acknowledge-bit, it is necessary to minimize + * the ITO resistance of the SDA_OUT trace to guarantee a valid low level." + * + */ + bool ignore_nak; + + bool grayscale; + u32 height_mm; + u32 width_mm; + u32 startline; + u32 nlines; + u32 ncols; + u32 bpp; + + /* Intermediate buffer in LCD friendly format */ + u8 *hwbuf; + + /* Row of (transformed) pixels ready to be written to the display */ + u8 *row; +}; + +static inline struct st7571_device *drm_to_st7571(struct drm_device *dev) +{ + return container_of(dev, struct st7571_device, dev); +} + +static int st7571_regmap_write(void *context, const void *data, size_t count) +{ + struct i2c_client *client = context; + struct st7571_device *st7571 = i2c_get_clientdata(client); + int ret; + + struct i2c_msg msg = { + .addr = st7571->client->addr, + .flags = st7571->ignore_nak ? I2C_M_IGNORE_NAK : 0, + .len = count, + .buf = (u8 *)data + }; + + ret = i2c_transfer(st7571->client->adapter, &msg, 1); + + /* + * Unfortunately, there is no way to check if the transfer failed because of + * a NAK or something else as I2C bus drivers use different return values for NAK. + * + * However, if the transfer fails and ignore_nak is set, we know it is an error. + */ + if (ret < 0 && st7571->ignore_nak) + return ret; + + return 0; +} + +/* The st7571 driver does not read registers but regmap expects a .read */ +static int st7571_regmap_read(void *context, const void *reg_buf, + size_t reg_size, void *val_buf, size_t val_size) +{ + return -EOPNOTSUPP; +} + +static int st7571_send_command_list(struct st7571_device *st7571, + const u8 *cmd_list, size_t len) +{ + int ret; + + for (int i = 0; i < len; i++) { + ret = regmap_write(st7571->regmap, ST7571_COMMAND_MODE, cmd_list[i]); + if (ret < 0) + return ret; + } + + return ret; +} + +static inline u8 st7571_transform_xy(const char *p, int x, int y) +{ + int xrest = x % 8; + u8 result = 0; + + /* + * Transforms an (x, y) pixel coordinate into a vertical 8-bit + * column from the framebuffer. It calculates the corresponding byte in the + * framebuffer, extracts the bit at the given x position across 8 consecutive + * rows, and packs those bits into a single byte. + * + * Return an 8-bit value representing a vertical column of pixels. + */ + x = x / 8; + y = (y / 8) * 8; + + for (int i = 0; i < 8; i++) { + int row_idx = y + i; + u8 byte = p[row_idx * 16 + x]; + u8 bit = (byte >> xrest) & 1; + + result |= (bit << i); + } + + return result; +} + +static int st7571_set_position(struct st7571_device *st7571, int x, int y) +{ + u8 cmd_list[] = { + ST7571_SET_COLUMN_LSB(x), + ST7571_SET_COLUMN_MSB(x), + ST7571_SET_PAGE(y / ST7571_PAGE_HEIGHT), + }; + + return st7571_send_command_list(st7571, cmd_list, ARRAY_SIZE(cmd_list)); +} + +static int st7571_fb_clear_screen(struct st7571_device *st7571) +{ + u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp; + char pixelvalue = 0x00; + + for (int i = 0; i < npixels; i++) + regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1); + + return 0; +} + +static void st7571_prepare_buffer_monochrome(struct st7571_device *st7571, + const struct iosys_map *vmap, + struct drm_framebuffer *fb, + struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state) +{ + unsigned int dst_pitch; + struct iosys_map dst; + u32 size; + + switch (fb->format->format) { + case DRM_FORMAT_XRGB8888: + dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8); + iosys_map_set_vaddr(&dst, st7571->hwbuf); + + drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); + break; + + case DRM_FORMAT_R1: + size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8; + memcpy(st7571->hwbuf, vmap->vaddr, size); + break; + } +} + +static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571, + const struct iosys_map *vmap, + struct drm_framebuffer *fb, + struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state) +{ + u32 size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8; + unsigned int dst_pitch; + struct iosys_map dst; + + switch (fb->format->format) { + case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */ + dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8); + iosys_map_set_vaddr(&dst, st7571->hwbuf); + + drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); + break; + + case DRM_FORMAT_R1: + size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8; + memcpy(st7571->hwbuf, vmap->vaddr, size); + break; + + case DRM_FORMAT_R2: + size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4; + memcpy(st7571->hwbuf, vmap->vaddr, size); + break; + }; +} + +static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect) +{ + struct st7571_device *st7571 = drm_to_st7571(fb->dev); + char *row = st7571->row; + + /* Align y to display page boundaries */ + rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT); + rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines); + + for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) { + for (int x = rect->x1; x < rect->x2; x++) + row[x] = st7571_transform_xy(st7571->hwbuf, x, y); + + st7571_set_position(st7571, rect->x1, y); + + /* TODO: Investige why we can't write multiple bytes at once */ + for (int x = rect->x1; x < rect->x2; x++) + regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1); + } + + return 0; +} + +static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct drm_rect *rect) +{ + struct st7571_device *st7571 = drm_to_st7571(fb->dev); + u32 format = fb->format->format; + char *row = st7571->row; + int x1; + int x2; + + /* Align y to display page boundaries */ + rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT); + rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines); + + switch (format) { + case DRM_FORMAT_XRGB8888: + /* Threated as monochrome (R1) */ + fallthrough; + case DRM_FORMAT_R1: + x1 = rect->x1; + x2 = rect->x2; + break; + case DRM_FORMAT_R2: + x1 = rect->x1 * 2; + x2 = rect->x2 * 2; + break; + } + + for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) { + for (int x = x1; x < x2; x++) + row[x] = st7571_transform_xy(st7571->hwbuf, x, y); + + st7571_set_position(st7571, rect->x1, y); + + /* TODO: Investige why we can't write multiple bytes at once */ + for (int x = x1; x < x2; x++) { + regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1); + + /* + * As the display supports grayscale, all pixels must be written as two bits + * even if the format is monochrome. + * + * The bit values maps to the following grayscale: + * 0 0 = White + * 0 1 = Light gray + * 1 0 = Dark gray + * 1 1 = Black + * + * For monochrome formats, write the same value twice to get + * either a black or white pixel. + */ + if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888) + regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1); + } + } + + return 0; +} + +static int st7571_connector_get_modes(struct drm_connector *conn) +{ + struct st7571_device *st7571 = drm_to_st7571(conn->dev); + + return drm_connector_helper_get_modes_fixed(conn, &st7571->mode); +} + +static const struct drm_connector_helper_funcs st7571_connector_helper_funcs = { + .get_modes = st7571_connector_get_modes, +}; + +static const struct st7571_panel_format st7571_monochrome = { + .prepare_buffer = st7571_prepare_buffer_monochrome, + .update_rect = st7571_fb_update_rect_monochrome, + .mode = ST7571_COLOR_MODE_BLACKWHITE, + .formats = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_R1, + }, + .nformats = 2, +}; + +static const struct st7571_panel_format st7571_grayscale = { + .prepare_buffer = st7571_prepare_buffer_grayscale, + .update_rect = st7571_fb_update_rect_grayscale, + .mode = ST7571_COLOR_MODE_GRAY, + .formats = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_R1, + DRM_FORMAT_R2, + }, + .nformats = 3, +}; + +static const u64 st7571_primary_plane_fmtmods[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static int st7571_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} + +static void st7571_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_atomic_helper_damage_iter iter; + struct drm_device *dev = plane->dev; + struct drm_rect damage; + struct st7571_device *st7571 = drm_to_st7571(plane->dev); + int ret, idx; + + if (!fb) + return; /* no framebuffer; plane is disabled */ + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + return; + + if (!drm_dev_enter(dev, &idx)) + goto out_drm_gem_fb_end_cpu_access; + + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + st7571->pformat->prepare_buffer(st7571, + &shadow_plane_state->data[0], + fb, &damage, + &shadow_plane_state->fmtcnv_state); + + st7571->pformat->update_rect(fb, &damage); + } + + drm_dev_exit(idx); + +out_drm_gem_fb_end_cpu_access: + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); +} + +static void st7571_primary_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct st7571_device *st7571 = drm_to_st7571(plane->dev); + int idx; + + if (!drm_dev_enter(dev, &idx)) + return; + + st7571_fb_clear_screen(st7571); + drm_dev_exit(idx); +} + +static const struct drm_plane_helper_funcs st7571_primary_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = st7571_primary_plane_helper_atomic_check, + .atomic_update = st7571_primary_plane_helper_atomic_update, + .atomic_disable = st7571_primary_plane_helper_atomic_disable, +}; + +static const struct drm_plane_funcs st7571_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + DRM_GEM_SHADOW_PLANE_FUNCS, +}; + +/* + * CRTC + */ + +static enum drm_mode_status st7571_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct st7571_device *st7571 = drm_to_st7571(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &st7571->mode); +} + +static const struct drm_crtc_helper_funcs st7571_crtc_helper_funcs = { + .atomic_check = drm_crtc_helper_atomic_check, + .mode_valid = st7571_crtc_mode_valid, +}; + +static const struct drm_crtc_funcs st7571_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +/* + * Encoder + */ + +static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_device *drm = encoder->dev; + struct st7571_device *st7571 = drm_to_st7571(drm); + u8 command = ST7571_DISPLAY_ON; + int ret; + + ret = st7571->pdata->init(st7571); + if (ret) + return; + + st7571_send_command_list(st7571, &command, 1); +} + +static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_device *drm = encoder->dev; + struct st7571_device *st7571 = drm_to_st7571(drm); + u8 command = ST7571_DISPLAY_OFF; + + st7571_send_command_list(st7571, &command, 1); +} + +static const struct drm_encoder_funcs st7571_encoder_funcs = { + .destroy = drm_encoder_cleanup, + +}; + +static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = { + .atomic_enable = ssd130x_encoder_atomic_enable, + .atomic_disable = ssd130x_encoder_atomic_disable, +}; + +/* + * Connector + */ + +static const struct drm_connector_funcs st7571_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_mode_config_funcs st7571_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static struct drm_display_mode st7571_mode(struct st7571_device *st7571) +{ + struct drm_display_mode mode = { + DRM_SIMPLE_MODE(st7571->ncols, st7571->nlines, + st7571->width_mm, st7571->height_mm), + }; + + return mode; +} + +static int st7571_mode_config_init(struct st7571_device *st7571) +{ + struct drm_device *dev = &st7571->dev; + const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints; + int ret; + + ret = drmm_mode_config_init(dev); + if (ret) + return ret; + + dev->mode_config.min_width = constraints->min_ncols; + dev->mode_config.min_height = constraints->min_nlines; + dev->mode_config.max_width = constraints->max_ncols; + dev->mode_config.max_height = constraints->max_nlines; + dev->mode_config.preferred_depth = 24; + dev->mode_config.funcs = &st7571_mode_config_funcs; + + return 0; +} + +static int st7571_plane_init(struct st7571_device *st7571, + const struct st7571_panel_format *pformat) +{ + struct drm_plane *primary_plane = &st7571->primary_plane; + struct drm_device *dev = &st7571->dev; + int ret; + + ret = drm_universal_plane_init(dev, primary_plane, 0, + &st7571_primary_plane_funcs, + pformat->formats, + pformat->nformats, + st7571_primary_plane_fmtmods, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_plane_helper_add(primary_plane, &st7571_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); + + return 0; +} + +static int st7571_crtc_init(struct st7571_device *st7571) +{ + struct drm_plane *primary_plane = &st7571->primary_plane; + struct drm_crtc *crtc = &st7571->crtc; + struct drm_device *dev = &st7571->dev; + int ret; + + ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, + &st7571_crtc_funcs, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &st7571_crtc_helper_funcs); + + return 0; +} + +static int st7571_encoder_init(struct st7571_device *st7571) +{ + struct drm_encoder *encoder = &st7571->encoder; + struct drm_crtc *crtc = &st7571->crtc; + struct drm_device *dev = &st7571->dev; + int ret; + + ret = drm_encoder_init(dev, encoder, &st7571_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + drm_encoder_helper_add(encoder, &st7571_encoder_helper_funcs); + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + return 0; +} + +static int st7571_connector_init(struct st7571_device *st7571) +{ + struct drm_connector *connector = &st7571->connector; + struct drm_encoder *encoder = &st7571->encoder; + struct drm_device *dev = &st7571->dev; + int ret; + + ret = drm_connector_init(dev, connector, &st7571_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + if (ret) + return ret; + + drm_connector_helper_add(connector, &st7571_connector_helper_funcs); + + return drm_connector_attach_encoder(connector, encoder); +} + +DEFINE_DRM_GEM_FOPS(st7571_fops); + +static const struct drm_driver st7571_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + + .fops = &st7571_fops, + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, +}; + +static const struct regmap_bus st7571_regmap_bus = { + .read = st7571_regmap_read, + .write = st7571_regmap_write, +}; + +static const struct regmap_config st7571_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .use_single_write = true, +}; + +static int st7571_validate_parameters(struct st7571_device *st7571) +{ + struct device *dev = st7571->dev.dev; + const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints; + + if (st7571->width_mm == 0) { + dev_err(dev, "Invalid panel width\n"); + return -EINVAL; + } + + if (st7571->height_mm == 0) { + dev_err(dev, "Invalid panel height\n"); + return -EINVAL; + } + + if (st7571->nlines < constraints->min_nlines || + st7571->nlines > constraints->max_nlines) { + dev_err(dev, "Invalid timing configuration.\n"); + return -EINVAL; + } + + if (st7571->startline + st7571->nlines > constraints->max_nlines) { + dev_err(dev, "Invalid timing configuration.\n"); + return -EINVAL; + } + + if (st7571->ncols < constraints->min_ncols || + st7571->ncols > constraints->max_ncols) { + dev_err(dev, "Invalid timing configuration.\n"); + return -EINVAL; + } + + if (st7571->grayscale && !constraints->support_grayscale) { + dev_err(dev, "Grayscale not supported\n"); + return -EINVAL; + } + + return 0; +} + +static int st7571_parse_dt(struct st7571_device *st7571) +{ + struct device *dev = &st7571->client->dev; + struct device_node *np = dev->of_node; + struct display_timing dt; + int ret; + + ret = of_get_display_timing(np, "panel-timing", &dt); + if (ret) { + dev_err(dev, "Failed to get display timing from DT\n"); + return ret; + } + + of_property_read_u32(np, "width-mm", &st7571->width_mm); + of_property_read_u32(np, "height-mm", &st7571->height_mm); + st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale"); + + if (st7571->grayscale) { + st7571->pformat = &st7571_grayscale; + st7571->bpp = 2; + } else { + st7571->pformat = &st7571_monochrome; + st7571->bpp = 1; + } + + st7571->startline = dt.vfront_porch.typ; + st7571->nlines = dt.vactive.typ; + st7571->ncols = dt.hactive.typ; + + st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(st7571->reset)) + return PTR_ERR(st7571->reset); + + return 0; +} + +static void st7571_reset(struct st7571_device *st7571) +{ + gpiod_set_value_cansleep(st7571->reset, 1); + fsleep(20); + gpiod_set_value_cansleep(st7571->reset, 0); +} + +static int st7571_lcd_init(struct st7571_device *st7571) +{ + /* + * Most of the initialization sequence is taken directly from the + * referential initial code in the ST7571 datasheet. + */ + u8 commands[] = { + ST7571_DISPLAY_OFF, + + ST7571_SET_MODE_MSB, + ST7571_SET_MODE_LSB(0x2e), + + ST7571_SET_SEG_SCAN_DIR(0), + ST7571_SET_COM_SCAN_DIR(1), + + ST7571_SET_COM0_MSB, + ST7571_SET_COM0_LSB(0x00), + + ST7571_SET_START_LINE_MSB, + ST7571_SET_START_LINE_LSB(st7571->startline), + + ST7571_OSC_ON, + ST7571_SET_REGULATOR_REG(5), + ST7571_SET_CONTRAST_MSB, + ST7571_SET_CONTRAST_LSB(0x33), + ST7571_SET_LCD_BIAS(0x04), + ST7571_SET_DISPLAY_DUTY_MSB, + ST7571_SET_DISPLAY_DUTY_LSB(st7571->nlines), + + ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */ + ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */ + ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */ + + ST7571_COMMAND_SET_3, + ST7571_SET_COLOR_MODE(st7571->pformat->mode), + ST7571_COMMAND_SET_NORMAL, + + ST7571_SET_REVERSE(0), + ST7571_SET_ENTIRE_DISPLAY_ON(0), + }; + + /* Perform a reset before initializing the controller */ + st7571_reset(st7571); + + return st7571_send_command_list(st7571, commands, ARRAY_SIZE(commands)); +} + +static int st7571_probe(struct i2c_client *client) +{ + struct st7571_device *st7571; + struct drm_device *dev; + int ret; + + st7571 = devm_drm_dev_alloc(&client->dev, &st7571_driver, + struct st7571_device, dev); + if (IS_ERR(st7571)) + return PTR_ERR(st7571); + + dev = &st7571->dev; + st7571->client = client; + i2c_set_clientdata(client, st7571); + st7571->pdata = device_get_match_data(&client->dev); + + ret = st7571_parse_dt(st7571); + if (ret) + return ret; + + ret = st7571_validate_parameters(st7571); + if (ret) + return ret; + + st7571->mode = st7571_mode(st7571); + + /* + * The hardware design could make it hard to detect a NAK on the I2C bus. + * If the adapter does not support protocol mangling do + * not set the I2C_M_IGNORE_NAK flag at the expense * of possible + * cruft in the logs. + */ + if (i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING)) + st7571->ignore_nak = true; + + st7571->regmap = devm_regmap_init(&client->dev, &st7571_regmap_bus, + client, &st7571_regmap_config); + if (IS_ERR(st7571->regmap)) { + return dev_err_probe(&client->dev, PTR_ERR(st7571->regmap), + "Failed to initialize regmap\n"); + } + + st7571->hwbuf = devm_kzalloc(&client->dev, + (st7571->nlines * st7571->ncols * st7571->bpp) / 8, + GFP_KERNEL); + if (IS_ERR(st7571->hwbuf)) + return dev_err_probe(&client->dev, PTR_ERR(st7571->hwbuf), + "Failed to allocate intermediate buffer\n"); + + st7571->row = devm_kzalloc(&client->dev, + (st7571->ncols * st7571->bpp), + GFP_KERNEL); + if (IS_ERR(st7571->row)) + return dev_err_probe(&client->dev, PTR_ERR(st7571->row), + "Failed to allocate row buffer\n"); + + ret = st7571_mode_config_init(st7571); + if (ret) + return dev_err_probe(&client->dev, ret, + "Failed to initialize mode config\n"); + + ret = st7571_plane_init(st7571, st7571->pformat); + if (ret) + return dev_err_probe(&client->dev, ret, + "Failed to initialize primary plane\n"); + + ret = st7571_crtc_init(st7571); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to initialize CRTC\n"); + + ret = st7571_encoder_init(st7571); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to initialize encoder\n"); + + ret = st7571_connector_init(st7571); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to initialize connector\n"); + + drm_mode_config_reset(dev); + + ret = drm_dev_register(dev, 0); + if (ret) + return dev_err_probe(&client->dev, ret, + "Failed to register DRM device\n"); + + drm_client_setup(dev, NULL); + return 0; +} + +static void st7571_remove(struct i2c_client *client) +{ + struct st7571_device *st7571 = i2c_get_clientdata(client); + + drm_dev_unplug(&st7571->dev); +} + +struct st7571_panel_data st7571_config = { + .init = st7571_lcd_init, + .constraints = { + .min_nlines = 1, + .max_nlines = 128, + .min_ncols = 128, + .max_ncols = 128, + .support_grayscale = true, + }, +}; + +static const struct of_device_id st7571_of_match[] = { + { .compatible = "sitronix,st7571", .data = &st7571_config }, + {}, +}; +MODULE_DEVICE_TABLE(of, st7571_of_match); + +static const struct i2c_device_id st7571_id[] = { + { "st7571", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, st7571_id); + +static struct i2c_driver st7571_i2c_driver = { + .driver = { + .name = "st7571", + .of_match_table = st7571_of_match, + }, + .probe = st7571_probe, + .remove = st7571_remove, + .id_table = st7571_id, +}; + +module_i2c_driver(st7571_i2c_driver); + +MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>"); +MODULE_DESCRIPTION("DRM Driver for Sitronix ST7571 LCD controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index f8f20d2f6174..e08e5a138420 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, ttm_dev); resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL); - KUNIT_ASSERT_NOT_NULL(test, ttm_dev); + KUNIT_ASSERT_NOT_NULL(test, resv); err = ttm_device_kunit_init(priv, ttm_dev, false, false); KUNIT_ASSERT_EQ(test, err, 0); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5bf3c969907c..08a23ab037cb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -235,7 +235,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work) bo = container_of(work, typeof(*bo), delayed_delete); - dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, + dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); dma_resv_lock(bo->base.resv, NULL); ttm_bo_cleanup_memtype_use(bo); @@ -270,7 +270,7 @@ static void ttm_bo_release(struct kref *kref) drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); ttm_mem_io_free(bdev, bo->resource); - if (!dma_resv_test_signaled(bo->base.resv, + if (!dma_resv_test_signaled(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP) || (want_init_on_free() && (bo->ttm != NULL)) || bo->type == ttm_bo_type_sg || diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index a194db83421d..bdfa6ecfef05 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -220,7 +220,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, - .force_alloc = true }; ttm = bo->ttm; diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 7e5a60c55813..769b0ca9be47 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -548,7 +548,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, - .force_alloc = true }; struct dma_fence *fence; int ret; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 05b3a152cc33..1922988625eb 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -22,13 +22,14 @@ static int udl_usb_suspend(struct usb_interface *interface, pm_message_t message) { struct drm_device *dev = usb_get_intfdata(interface); + struct udl_device *udl = to_udl(dev); int ret; ret = drm_mode_config_helper_suspend(dev); if (ret) return ret; - udl_sync_pending_urbs(dev); + udl_sync_pending_urbs(udl); return 0; } @@ -49,22 +50,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface) return drm_mode_config_helper_resume(dev); } -/* - * FIXME: Dma-buf sharing requires DMA support by the importing device. - * This function is a workaround to make USB devices work as well. - * See todo.rst for how to fix the issue in the dma-buf framework. - */ -static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct udl_device *udl = to_udl(dev); - - if (!udl->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev); -} - DEFINE_DRM_GEM_FOPS(udl_driver_fops); static const struct drm_driver driver = { @@ -73,7 +58,6 @@ static const struct drm_driver driver = { /* GEM hooks */ .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = udl_driver_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, @@ -126,10 +110,10 @@ static int udl_usb_probe(struct usb_interface *interface, static void udl_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + struct udl_device *udl = to_udl(dev); - drm_kms_helper_poll_fini(dev); - udl_drop_usb(dev); drm_dev_unplug(dev); + udl_drop_usb(udl); } /* diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index be00dc1d87a1..145bb95ccc48 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,18 +50,14 @@ struct urb_list { struct udl_device { struct drm_device drm; - struct device *dev; - struct device *dmadev; + + unsigned long sku_pixel_limit; struct drm_plane primary_plane; struct drm_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; - struct mutex gem_lock; - - int sku_pixel_limit; - struct urb_list urbs; }; @@ -73,22 +69,22 @@ static inline struct usb_device *udl_to_usb_device(struct udl_device *udl) } /* modeset */ -int udl_modeset_init(struct drm_device *dev); +int udl_modeset_init(struct udl_device *udl); struct drm_connector *udl_connector_init(struct drm_device *dev); -struct urb *udl_get_urb(struct drm_device *dev); +struct urb *udl_get_urb(struct udl_device *udl); -int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); -void udl_sync_pending_urbs(struct drm_device *dev); +int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len); +void udl_sync_pending_urbs(struct udl_device *udl); void udl_urb_completion(struct urb *urb); int udl_init(struct udl_device *udl); -int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, +int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, u32 byte_offset, u32 device_byte_offset, u32 byte_width); -int udl_drop_usb(struct drm_device *dev); +int udl_drop_usb(struct udl_device *udl); int udl_select_std_channel(struct udl_device *udl); #endif diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 3ebe2ce55dfd..bc58991a6f14 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -8,6 +8,8 @@ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> */ +#include <linux/unaligned.h> + #include <drm/drm.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> @@ -23,72 +25,99 @@ #define WRITES_IN_FLIGHT (20) #define MAX_VENDOR_DESCRIPTOR_SIZE 256 +#define UDL_SKU_PIXEL_LIMIT_DEFAULT 2080000 + static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout); +/* + * Try to make sense of whatever we parse. Therefore return @end on + * errors, but don't fail hard. + */ +static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end) +{ + u16 key; + u8 len; + + /* read key */ + if (pos >= end - 2) + return end; + key = get_unaligned_le16(pos); + pos += 2; + + /* read value length */ + if (pos >= end - 1) + return end; + len = *pos++; + + /* read value */ + if (pos >= end - len) + return end; + switch (key) { + case 0x0200: { /* maximum number of pixels */ + unsigned int sku_pixel_limit; + + if (len < sizeof(__le32)) + break; + sku_pixel_limit = get_unaligned_le32(pos); + if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT) + break; /* almost 100 MiB, so probably bogus */ + udl->sku_pixel_limit = sku_pixel_limit; + break; + } + default: + break; + } + pos += len; + + return pos; +} + static int udl_parse_vendor_descriptor(struct udl_device *udl) { + struct drm_device *dev = &udl->drm; struct usb_device *udev = udl_to_usb_device(udl); - char *desc; - char *buf; - char *desc_end; - - u8 total_len = 0; + bool detected = false; + void *buf; + int ret; + unsigned int len; + const u8 *desc; + const u8 *desc_end; buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); if (!buf) - return false; + return -ENOMEM; + + ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */ + 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE); + if (ret < 0) + goto out; + len = ret; + + if (len < 5) + goto out; + desc = buf; + desc_end = desc + len; - total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */ - 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); - if (total_len > 5) { - DRM_INFO("vendor descriptor length:%x data:%11ph\n", - total_len, desc); - - if ((desc[0] != total_len) || /* descriptor length */ - (desc[1] != 0x5f) || /* vendor descriptor type */ - (desc[2] != 0x01) || /* version (2 bytes) */ - (desc[3] != 0x00) || - (desc[4] != total_len - 2)) /* length after type */ - goto unrecognized; - - desc_end = desc + total_len; - desc += 5; /* the fixed header we've already parsed */ - - while (desc < desc_end) { - u8 length; - u16 key; - - key = le16_to_cpu(*((u16 *) desc)); - desc += sizeof(u16); - length = *desc; - desc++; - - switch (key) { - case 0x0200: { /* max_area */ - u32 max_area; - max_area = le32_to_cpu(*((u32 *)desc)); - DRM_DEBUG("DL chip limited to %d pixel modes\n", - max_area); - udl->sku_pixel_limit = max_area; - break; - } - default: - break; - } - desc += length; - } - } + if ((desc[0] != len) || /* descriptor length */ + (desc[1] != 0x5f) || /* vendor descriptor type */ + (desc[2] != 0x01) || /* version (2 bytes) */ + (desc[3] != 0x00) || + (desc[4] != len - 2)) /* length after type */ + goto out; + desc += 5; - goto success; + detected = true; -unrecognized: - /* allow udlfb to load for now even if firmware unrecognized */ - DRM_ERROR("Unrecognized vendor firmware descriptor\n"); + while (desc < desc_end) + desc = udl_parse_key_value_pair(udl, desc, desc_end); -success: +out: + if (!detected) + drm_warn(dev, "Unrecognized vendor firmware descriptor\n"); kfree(buf); - return true; + + return 0; } /* @@ -145,9 +174,8 @@ void udl_urb_completion(struct urb *urb) wake_up(&udl->urbs.sleep); } -static void udl_free_urb_list(struct drm_device *dev) +static void udl_free_urb_list(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); struct urb_node *unode; struct urb *urb; @@ -172,9 +200,8 @@ static void udl_free_urb_list(struct drm_device *dev) wake_up_all(&udl->urbs.sleep); } -static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) +static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size) { - struct udl_device *udl = to_udl(dev); struct urb *urb; struct urb_node *unode; char *buf; @@ -210,7 +237,7 @@ retry: usb_free_urb(urb); if (size > PAGE_SIZE) { size /= 2; - udl_free_urb_list(dev); + udl_free_urb_list(udl); goto retry; } break; @@ -259,9 +286,8 @@ static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout) } #define GET_URB_TIMEOUT HZ -struct urb *udl_get_urb(struct drm_device *dev) +struct urb *udl_get_urb(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); struct urb *urb; spin_lock_irq(&udl->urbs.lock); @@ -270,9 +296,8 @@ struct urb *udl_get_urb(struct drm_device *dev) return urb; } -int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) +int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len) { - struct udl_device *udl = to_udl(dev); int ret; if (WARN_ON(len > udl->urbs.size)) { @@ -290,9 +315,9 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) } /* wait until all pending URBs have been processed */ -void udl_sync_pending_urbs(struct drm_device *dev) +void udl_sync_pending_urbs(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); + struct drm_device *dev = &udl->drm; spin_lock_irq(&udl->urbs.lock); /* 2 seconds as a sane timeout */ @@ -308,53 +333,55 @@ int udl_init(struct udl_device *udl) { struct drm_device *dev = &udl->drm; int ret = -ENOMEM; + struct device *dma_dev; DRM_DEBUG("\n"); - udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); - if (!udl->dmadev) + dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (dma_dev) { + drm_dev_set_dma_dev(dev, dma_dev); + put_device(dma_dev); + } else { drm_warn(dev, "buffer sharing not supported"); /* not an error */ + } - mutex_init(&udl->gem_lock); + /* + * Not all devices provide vendor descriptors with device + * information. Initialize to default values of real-world + * devices. It is just enough memory for FullHD. + */ + udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT; - if (!udl_parse_vendor_descriptor(udl)) { - ret = -ENODEV; - DRM_ERROR("firmware not recognized. Assume incompatible device\n"); + ret = udl_parse_vendor_descriptor(udl); + if (ret) goto err; - } if (udl_select_std_channel(udl)) DRM_ERROR("Selecting channel failed\n"); - if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { + if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) { DRM_ERROR("udl_alloc_urb_list failed\n"); + ret = -ENOMEM; goto err; } DRM_DEBUG("\n"); - ret = udl_modeset_init(dev); + ret = udl_modeset_init(udl); if (ret) goto err; - drm_kms_helper_poll_init(dev); - return 0; err: if (udl->urbs.count) - udl_free_urb_list(dev); - put_device(udl->dmadev); + udl_free_urb_list(udl); DRM_ERROR("%d\n", ret); return ret; } -int udl_drop_usb(struct drm_device *dev) +int udl_drop_usb(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); - - udl_free_urb_list(dev); - put_device(udl->dmadev); - udl->dmadev = NULL; + udl_free_urb_list(udl); return 0; } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index bbb04f98886a..231e829bd709 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -205,6 +205,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, const struct drm_rect *clip) { struct drm_device *dev = fb->dev; + struct udl_device *udl = to_udl(dev); void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */ int i, ret; char *cmd; @@ -216,7 +217,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, return ret; log_bpp = ret; - urb = udl_get_urb(dev); + urb = udl_get_urb(udl); if (!urb) return -ENOMEM; cmd = urb->transfer_buffer; @@ -226,7 +227,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, const int byte_offset = line_offset + (clip->x1 << log_bpp); const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp; const int byte_width = drm_rect_width(clip) << log_bpp; - ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr, + ret = udl_render_hline(udl, log_bpp, &urb, (char *)vaddr, &cmd, byte_offset, dev_byte_offset, byte_width); if (ret) @@ -239,7 +240,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length) *cmd++ = UDL_MSG_BULK; len = cmd - (char *)urb->transfer_buffer; - ret = udl_submit_urb(dev, urb, len); + ret = udl_submit_urb(udl, urb, len); } else { udl_urb_completion(urb); } @@ -330,6 +331,7 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = { static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; + struct udl_device *udl = to_udl(dev); struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct drm_display_mode *mode = &crtc_state->mode; struct urb *urb; @@ -339,7 +341,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom if (!drm_dev_enter(dev, &idx)) return; - urb = udl_get_urb(dev); + urb = udl_get_urb(udl); if (!urb) goto out; @@ -355,7 +357,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom buf = udl_vidreg_unlock(buf); buf = udl_dummy_render(buf); - udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer); + udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer); out: drm_dev_exit(idx); @@ -364,6 +366,7 @@ out: static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; + struct udl_device *udl = to_udl(dev); struct urb *urb; char *buf; int idx; @@ -371,7 +374,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato if (!drm_dev_enter(dev, &idx)) return; - urb = udl_get_urb(dev); + urb = udl_get_urb(udl); if (!urb) goto out; @@ -381,7 +384,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato buf = udl_vidreg_unlock(buf); buf = udl_dummy_render(buf); - udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer); + udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer); out: drm_dev_exit(idx); @@ -476,9 +479,9 @@ static const struct drm_mode_config_funcs udl_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -int udl_modeset_init(struct drm_device *dev) +int udl_modeset_init(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); + struct drm_device *dev = &udl->drm; struct drm_plane *primary_plane; struct drm_crtc *crtc; struct drm_encoder *encoder; @@ -535,6 +538,7 @@ int udl_modeset_init(struct drm_device *dev) return ret; drm_mode_config_reset(dev); + drmm_kms_helper_poll_init(dev); return 0; } diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 62224992988f..7d670b3a5293 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -170,7 +170,7 @@ static void udl_compress_hline16( * (that we can only write to, slowly, and can never read), and (optionally) * our shadow copy that tracks what's been sent to that hardware buffer. */ -int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, +int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr, const char *front, char **urb_buf_ptr, u32 byte_offset, u32 device_byte_offset, u32 byte_width) @@ -199,10 +199,10 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, if (cmd >= cmd_end) { int len = cmd - (u8 *) urb->transfer_buffer; - int ret = udl_submit_urb(dev, urb, len); + int ret = udl_submit_urb(udl, urb, len); if (ret) return ret; - urb = udl_get_urb(dev); + urb = udl_get_urb(udl); if (!urb) return -EAGAIN; *urb_ptr = urb; diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 76816f2551c1..7e789e181af0 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -21,74 +21,74 @@ struct v3d_reg_def { }; static const struct v3d_reg_def v3d_hub_reg_defs[] = { - REGDEF(33, 42, V3D_HUB_AXICFG), - REGDEF(33, 71, V3D_HUB_UIFCFG), - REGDEF(33, 71, V3D_HUB_IDENT0), - REGDEF(33, 71, V3D_HUB_IDENT1), - REGDEF(33, 71, V3D_HUB_IDENT2), - REGDEF(33, 71, V3D_HUB_IDENT3), - REGDEF(33, 71, V3D_HUB_INT_STS), - REGDEF(33, 71, V3D_HUB_INT_MSK_STS), - - REGDEF(33, 71, V3D_MMU_CTL), - REGDEF(33, 71, V3D_MMU_VIO_ADDR), - REGDEF(33, 71, V3D_MMU_VIO_ID), - REGDEF(33, 71, V3D_MMU_DEBUG_INFO), - - REGDEF(71, 71, V3D_GMP_STATUS(71)), - REGDEF(71, 71, V3D_GMP_CFG(71)), - REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO), + + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)), }; static const struct v3d_reg_def v3d_gca_reg_defs[] = { - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN), - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK), }; static const struct v3d_reg_def v3d_core_reg_defs[] = { - REGDEF(33, 71, V3D_CTL_IDENT0), - REGDEF(33, 71, V3D_CTL_IDENT1), - REGDEF(33, 71, V3D_CTL_IDENT2), - REGDEF(33, 71, V3D_CTL_MISCCFG), - REGDEF(33, 71, V3D_CTL_INT_STS), - REGDEF(33, 71, V3D_CTL_INT_MSK_STS), - REGDEF(33, 71, V3D_CLE_CT0CS), - REGDEF(33, 71, V3D_CLE_CT0CA), - REGDEF(33, 71, V3D_CLE_CT0EA), - REGDEF(33, 71, V3D_CLE_CT1CS), - REGDEF(33, 71, V3D_CLE_CT1CA), - REGDEF(33, 71, V3D_CLE_CT1EA), - - REGDEF(33, 71, V3D_PTB_BPCA), - REGDEF(33, 71, V3D_PTB_BPCS), - - REGDEF(33, 42, V3D_GMP_STATUS(33)), - REGDEF(33, 42, V3D_GMP_CFG(33)), - REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)), - - REGDEF(33, 71, V3D_ERR_FDBGO), - REGDEF(33, 71, V3D_ERR_FDBGB), - REGDEF(33, 71, V3D_ERR_FDBGS), - REGDEF(33, 71, V3D_ERR_STAT), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS), + + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT), }; static const struct v3d_reg_def v3d_csd_reg_defs[] = { - REGDEF(41, 71, V3D_CSD_STATUS), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)), - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7), + REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7), }; static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) @@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); seq_printf(m, "TFU: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, "TSY: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); } @@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) seq_printf(m, " QPUs: %d\n", nslc * qups); seq_printf(m, " Semaphores: %d\n", V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, " BCG int: %d\n", (ident2 & V3D_IDENT2_BCG_INT) != 0); } - if (v3d->ver < 40) { + if (v3d->ver < V3D_GEN_41) { seq_printf(m, " Override TMU: %d\n", (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); } @@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) int core = 0; int measure_ms = 1000; - if (v3d->ver >= 40) { + if (v3d->ver >= V3D_GEN_41) { int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, V3D_SET_FIELD_VER(cycle_count_reg, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 852015214e97..5e997ae8bc9c 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -17,6 +17,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/sched/clock.h> @@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, args->value = 1; return 0; case DRM_V3D_PARAM_SUPPORTS_PERFMON: - args->value = (v3d->ver >= 40); + args->value = (v3d->ver >= V3D_GEN_41); return 0; case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: args->value = 1; @@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = { }; static const struct of_device_id v3d_of_match[] = { - { .compatible = "brcm,2711-v3d" }, - { .compatible = "brcm,2712-v3d" }, - { .compatible = "brcm,7268-v3d" }, - { .compatible = "brcm,7278-v3d" }, + { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 }, + { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 }, + { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 }, + { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 }, {}, }; MODULE_DEVICE_TABLE(of, v3d_of_match); +static void +v3d_idle_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) { + DRM_ERROR("Failed to power up SMS\n"); + } + + v3d_reset_sms(v3d); +} + +static void +v3d_power_off_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) { + DRM_ERROR("Failed to power off SMS\n"); + } +} + static int map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) { @@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct drm_device *drm; struct v3d_dev *v3d; + enum v3d_gen gen; int ret; u32 mmu_debug; u32 ident1, ident3; @@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drm); + gen = (uintptr_t)of_device_get_match_data(dev); + v3d->ver = gen; + ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) return ret; @@ -295,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) return ret; + if (v3d->ver >= V3D_GEN_71) { + ret = map_regs(v3d, &v3d->sms_regs, "sms"); + if (ret) + return ret; + } + v3d->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(v3d->clk)) return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n"); @@ -305,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) return ret; } + v3d_idle_sms(v3d); + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); ret = dma_set_mask_and_coherent(dev, mask); @@ -316,6 +359,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) ident1 = V3D_READ(V3D_HUB_IDENT1); v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + /* Make sure that the V3D tech version retrieved from the HW is equal + * to the one advertised by the device tree. + */ + WARN_ON(v3d->ver != gen); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ @@ -340,7 +388,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) } } - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) goto clk_disable; @@ -400,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + v3d_power_off_sms(v3d); + clk_disable_unprepare(v3d->clk); } diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 9deaefa0f95b..b51f0b648a08 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -94,11 +94,18 @@ struct v3d_perfmon { u64 values[] __counted_by(ncounters); }; +enum v3d_gen { + V3D_GEN_33 = 33, + V3D_GEN_41 = 41, + V3D_GEN_42 = 42, + V3D_GEN_71 = 71, +}; + struct v3d_dev { struct drm_device drm; /* Short representation (e.g. 33, 41) of the V3D tech version */ - int ver; + enum v3d_gen ver; /* Short representation (e.g. 5, 6) of the V3D tech revision */ int rev; @@ -111,6 +118,7 @@ struct v3d_dev { void __iomem *core_regs[3]; void __iomem *bridge_regs; void __iomem *gca_regs; + void __iomem *sms_regs; struct clk *clk; struct reset_control *reset; @@ -199,7 +207,7 @@ to_v3d_dev(struct drm_device *dev) static inline bool v3d_has_csd(struct v3d_dev *v3d) { - return v3d->ver >= 41; + return v3d->ver >= V3D_GEN_41; } #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev) @@ -261,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence) #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) +#define V3D_SMS_IDLE 0x0 +#define V3D_SMS_ISOLATING_FOR_RESET 0xa +#define V3D_SMS_RESETTING 0xb +#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc +#define V3D_SMS_POWER_OFF_STATE 0xd + +#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset)) +#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset)) + #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) @@ -539,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); +void v3d_reset_sms(struct v3d_dev *v3d); void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b1e681630ded..d7d16da78db3 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core) * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ - if (v3d->ver < 40) + if (v3d->ver < V3D_GEN_41) V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush @@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core) static void v3d_idle_gca(struct v3d_dev *v3d) { - if (v3d->ver >= 41) + if (v3d->ver >= V3D_GEN_41) return; V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); @@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d) } void +v3d_reset_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE)); + + if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) && + !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) { + DRM_ERROR("Failed to wait for SMS reset\n"); + } +} + +void v3d_reset(struct v3d_dev *v3d) { struct drm_device *dev = &v3d->drm; @@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d) v3d_idle_axi(v3d, 0); v3d_idle_gca(v3d); + v3d_reset_sms(v3d); v3d_reset_v3d(v3d); v3d_mmu_set_page_table(v3d); @@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d) static void v3d_flush_l3(struct v3d_dev *v3d) { - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); - if (v3d->ver < 33) { + if (v3d->ver < V3D_GEN_33) { V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); } @@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d) static void v3d_invalidate_l2c(struct v3d_dev *v3d, int core) { - if (v3d->ver > 32) + if (v3d->ver >= V3D_GEN_33) return; V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 72b6a119412f..2cca5d3a26a2 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg) /* We shouldn't be triggering these if we have GMP in * always-allowed mode. */ - if (v3d->ver < 71 && (intsts & V3D_INT_GMPV)) + if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV)) dev_err(v3d->drm.dev, "GMP violation\n"); /* V3D 4.2 wires the hub and core IRQs together, so if we & @@ -186,27 +186,59 @@ v3d_hub_irq(int irq, void *arg) u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) << (v3d->va_width - 32)); - static const char *const v3d41_axi_ids[] = { - "L2T", - "PTB", - "PSE", - "TLB", - "CLE", - "TFU", - "MMU", - "GMP", + static const struct { + u32 begin; + u32 end; + const char *client; + } v3d41_axi_ids[] = { + {0x00, 0x20, "L2T"}, + {0x20, 0x21, "PTB"}, + {0x40, 0x41, "PSE"}, + {0x60, 0x80, "TLB"}, + {0x80, 0x88, "CLE"}, + {0xA0, 0xA1, "TFU"}, + {0xC0, 0xE0, "MMU"}, + {0xE0, 0xE1, "GMP"}, + }, v3d71_axi_ids[] = { + {0x00, 0x30, "L2T"}, + {0x30, 0x38, "CLE"}, + {0x38, 0x39, "PTB"}, + {0x39, 0x3A, "PSE"}, + {0x3A, 0x3B, "CSD"}, + {0x40, 0x60, "TLB"}, + {0x60, 0x70, "MMU"}, + {0x7C, 0x7E, "TFU"}, + {0x7F, 0x80, "GMP"}, }; const char *client = "?"; V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL)); - if (v3d->ver >= 41) { - axi_id = axi_id >> 5; - if (axi_id < ARRAY_SIZE(v3d41_axi_ids)) - client = v3d41_axi_ids[axi_id]; + if (v3d->ver >= V3D_GEN_71) { + size_t i; + + axi_id = axi_id & 0x7F; + for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) { + if (axi_id >= v3d71_axi_ids[i].begin && + axi_id < v3d71_axi_ids[i].end) { + client = v3d71_axi_ids[i].client; + break; + } + } + } else if (v3d->ver >= V3D_GEN_41) { + size_t i; + + axi_id = axi_id & 0xFF; + for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) { + if (axi_id >= v3d41_axi_ids[i].begin && + axi_id < v3d41_axi_ids[i].end) { + client = v3d41_axi_ids[i].client; + break; + } + } } - dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", + dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n", client, axi_id, (long long)vio_addr, ((intsts & V3D_HUB_INT_MMU_WRV) ? ", write violation" : ""), @@ -217,7 +249,7 @@ v3d_hub_irq(int irq, void *arg) status = IRQ_HANDLED; } - if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) { + if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) { dev_err(v3d->drm.dev, "GMP Violation\n"); status = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 3ebda2fa46fc..9a3fe5255874 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d) const struct v3d_perf_counter_desc *counters = NULL; unsigned int max = 0; - if (v3d->ver >= 71) { + if (v3d->ver >= V3D_GEN_71) { counters = v3d_v71_performance_counters; max = ARRAY_SIZE(v3d_v71_performance_counters); - } else if (v3d->ver >= 42) { + } else if (v3d->ver >= V3D_GEN_42) { counters = v3d_v42_performance_counters; max = ARRAY_SIZE(v3d_v42_performance_counters); } diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 6da3c69082bd..c1870265eaee 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -515,4 +515,30 @@ # define V3D_ERR_VPAERGS BIT(1) # define V3D_ERR_VPAEABB BIT(0) +#define V3D_SMS_REE_CS 0x00000 +#define V3D_SMS_TEE_CS 0x00400 +# define V3D_SMS_INTERRUPT BIT(31) +# define V3D_SMS_POWER_OFF BIT(30) +# define V3D_SMS_CLEAR_POWER_OFF BIT(29) +# define V3D_SMS_LOCK BIT(28) +# define V3D_SMS_CLEAR_LOCK BIT(27) +# define V3D_SMS_SVP_MODE_EXIT BIT(26) +# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25) +# define V3D_SMS_SVP_MODE_ENTER BIT(24) +# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23) +# define V3D_SMS_THEIR_MODE_EXIT BIT(22) +# define V3D_SMS_THEIR_MODE_ENTER BIT(21) +# define V3D_SMS_OUR_MODE_EXIT BIT(20) +# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19) +# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10) +# define V3D_SMS_SEQ_PC_SHIFT 10 +# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8) +# define V3D_SMS_HUBCORE_STATUS_SHIFT 8 +# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6) +# define V3D_SMS_NEW_MODE_SHIFT 6 +# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4) +# define V3D_SMS_OLD_MODE_SHIFT 4 +# define V3D_SMS_STATE_MASK V3D_MASK(3, 0) +# define V3D_SMS_STATE_SHIFT 0 + #endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 4a7701a33cf8..466d28ceee28 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -357,11 +357,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua); V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa); - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc); V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios); V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]); - if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { + if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]); V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]); V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]); @@ -412,7 +412,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) * * XXX: Set the CFG7 register */ - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0); /* CFG0 write kicks off the job. */ diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c index e70d7c3076ac..577d9a956369 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c @@ -61,6 +61,19 @@ static const struct drm_display_mode default_mode = { DRM_SIMPLE_MODE(640, 480, 64, 48) }; +/** + * vc4_mock_atomic_add_output() - Enables an output in a state + * @test: The test context object + * @state: Atomic state to enable the output in. + * @type: Type of the output encoder + * + * Adds an output CRTC and connector to a state, and enables them. + * + * Returns: + * 0 on success, a negative error code on failure. If the error is + * EDEADLK, the entire atomic sequence must be restarted. All other + * errors are fatal. + */ int vc4_mock_atomic_add_output(struct kunit *test, struct drm_atomic_state *state, enum vc4_encoder_type type) @@ -75,30 +88,49 @@ int vc4_mock_atomic_add_output(struct kunit *test, int ret; encoder = vc4_find_encoder_by_type(drm, type); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); + if (!encoder) + return -ENODEV; crtc = vc4_find_crtc_for_encoder(test, drm, encoder); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); + if (!crtc) + return -ENODEV; output = encoder_to_vc4_dummy_output(encoder); conn = &output->connector; conn_state = drm_atomic_get_connector_state(state, conn); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); - KUNIT_EXPECT_EQ(test, ret, 0); + if (ret) + return ret; crtc_state = drm_atomic_get_crtc_state(state, crtc); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode); - KUNIT_EXPECT_EQ(test, ret, 0); + if (ret) + return ret; crtc_state->active = true; return 0; } +/** + * vc4_mock_atomic_del_output() - Disables an output in a state + * @test: The test context object + * @state: Atomic state to disable the output in. + * @type: Type of the output encoder + * + * Adds an output CRTC and connector to a state, and disables them. + * + * Returns: + * 0 on success, a negative error code on failure. If the error is + * EDEADLK, the entire atomic sequence must be restarted. All other + * errors are fatal. + */ int vc4_mock_atomic_del_output(struct kunit *test, struct drm_atomic_state *state, enum vc4_encoder_type type) @@ -113,26 +145,32 @@ int vc4_mock_atomic_del_output(struct kunit *test, int ret; encoder = vc4_find_encoder_by_type(drm, type); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); + if (!encoder) + return -ENODEV; crtc = vc4_find_crtc_for_encoder(test, drm, encoder); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); + if (!crtc) + return -ENODEV; crtc_state = drm_atomic_get_crtc_state(state, crtc); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); crtc_state->active = false; ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); - KUNIT_ASSERT_EQ(test, ret, 0); + if (ret) + return ret; output = encoder_to_vc4_dummy_output(encoder); conn = &output->connector; conn_state = drm_atomic_get_connector_state(state, conn); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); - KUNIT_ASSERT_EQ(test, ret, 0); + if (ret) + return ret; return 0; } diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c index 992e8f5c5c6e..d1f694029169 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c +++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c @@ -20,7 +20,6 @@ struct pv_muxing_priv { struct vc4_dev *vc4; - struct drm_atomic_state *state; }; static bool check_fifo_conflict(struct kunit *test, @@ -677,18 +676,41 @@ static void drm_vc4_test_pv_muxing(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; const struct pv_muxing_priv *priv = test->priv; - struct drm_atomic_state *state = priv->state; + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + struct drm_device *drm; + struct vc4_dev *vc4; unsigned int i; int ret; + drm_modeset_acquire_init(&ctx, 0); + + vc4 = priv->vc4; + drm = &vc4->base; + +retry: + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); for (i = 0; i < params->nencoders; i++) { enum vc4_encoder_type enc_type = params->encoders[i]; ret = vc4_mock_atomic_add_output(test, state, enc_type); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } KUNIT_ASSERT_EQ(test, ret, 0); } ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_TRUE(test, @@ -700,33 +722,61 @@ static void drm_vc4_test_pv_muxing(struct kunit *test) KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type, params->check_fn)); } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } static void drm_vc4_test_pv_muxing_invalid(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; const struct pv_muxing_priv *priv = test->priv; - struct drm_atomic_state *state = priv->state; + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + struct drm_device *drm; + struct vc4_dev *vc4; unsigned int i; int ret; + drm_modeset_acquire_init(&ctx, 0); + + vc4 = priv->vc4; + drm = &vc4->base; + +retry: + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); + for (i = 0; i < params->nencoders; i++) { enum vc4_encoder_type enc_type = params->encoders[i]; ret = vc4_mock_atomic_add_output(test, state, enc_type); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } KUNIT_ASSERT_EQ(test, ret, 0); } ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } KUNIT_EXPECT_LT(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } static int vc4_pv_muxing_test_init(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; - struct drm_modeset_acquire_ctx ctx; struct pv_muxing_priv *priv; - struct drm_device *drm; struct vc4_dev *vc4; priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); @@ -737,15 +787,6 @@ static int vc4_pv_muxing_test_init(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); priv->vc4 = vc4; - drm_modeset_acquire_init(&ctx, 0); - - drm = &vc4->base; - priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state); - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - return 0; } @@ -800,13 +841,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; +retry_first: state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); @@ -823,13 +877,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); +retry_second: state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_second; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_second; + } KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); @@ -874,16 +941,35 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; +retry_first: state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); @@ -908,13 +994,26 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); +retry_second: state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_second; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_second; + } KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); @@ -968,25 +1067,50 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; +retry_first: state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_first; + } KUNIT_ASSERT_EQ(test, ret, 0); - ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); +retry_second: state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_second; + } KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry_second; + } KUNIT_ASSERT_EQ(test, ret, 0); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 779b22efe27b..458e5d987964 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -552,8 +552,6 @@ struct vc4_dsi { struct vc4_encoder encoder; struct mipi_dsi_host dsi_host; - struct kref kref; - struct platform_device *pdev; struct drm_bridge *out_bridge; @@ -1160,12 +1158,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge, } static int vc4_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); /* Attach the panel or bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi->out_bridge, + return drm_bridge_attach(encoder, dsi->out_bridge, &dsi->bridge, flags); } @@ -1621,29 +1620,11 @@ static void vc4_dsi_dma_chan_release(void *ptr) dsi->reg_dma_chan = NULL; } -static void vc4_dsi_release(struct kref *kref) -{ - struct vc4_dsi *dsi = - container_of(kref, struct vc4_dsi, kref); - - kfree(dsi); -} - -static void vc4_dsi_get(struct vc4_dsi *dsi) -{ - kref_get(&dsi->kref); -} - -static void vc4_dsi_put(struct vc4_dsi *dsi) -{ - kref_put(&dsi->kref, &vc4_dsi_release); -} - static void vc4_dsi_release_action(struct drm_device *drm, void *ptr) { struct vc4_dsi *dsi = ptr; - vc4_dsi_put(dsi); + drm_bridge_put(&dsi->bridge); } static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) @@ -1654,7 +1635,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) struct drm_encoder *encoder = &dsi->encoder.base; int ret; - vc4_dsi_get(dsi); + drm_bridge_get(&dsi->bridge); ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi); if (ret) @@ -1809,15 +1790,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct vc4_dsi *dsi; - dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); - if (!dsi) - return -ENOMEM; + dsi = devm_drm_bridge_alloc(&pdev->dev, struct vc4_dsi, bridge, &vc4_dsi_bridge_funcs); + if (IS_ERR(dsi)) + return PTR_ERR(dsi); dev_set_drvdata(dev, dsi); - kref_init(&dsi->kref); - dsi->pdev = pdev; - dsi->bridge.funcs = &vc4_dsi_bridge_funcs; #ifdef CONFIG_OF dsi->bridge.of_node = dev->of_node; #endif @@ -1835,7 +1813,6 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev) struct vc4_dsi *dsi = dev_get_drvdata(dev); mipi_dsi_host_unregister(&dsi->dsi_host); - vc4_dsi_put(dsi); } struct platform_driver vc4_dsi_driver = { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 37238a12baa5..a29a6ef266f9 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -51,6 +51,7 @@ #include <linux/reset.h> #include <sound/dmaengine_pcm.h> #include <sound/hdmi-codec.h> +#include <sound/jack.h> #include <sound/pcm_drm_eld.h> #include <sound/pcm_params.h> #include <sound/soc.h> @@ -372,13 +373,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, * the lock for now. */ + drm_atomic_helper_connector_hdmi_hotplug(connector, status); + if (status == connector_status_disconnected) { cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return; } - drm_atomic_helper_connector_hdmi_hotplug(connector, status); - cec_s_phys_addr(vc4_hdmi->cec_adap, connector->display_info.source_physical_address, false); @@ -2175,6 +2176,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = { .shutdown = vc4_hdmi_audio_shutdown, }; +static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd) +{ + struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card); + struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; + int ret; + + ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT, + &vc4_hdmi->hdmi_jack); + if (ret) { + dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret); + return ret; + } + + return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL); +} + static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) { const struct vc4_hdmi_register *mai_data = @@ -2288,6 +2305,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dai_link->cpus->dai_name = dev_name(dev); dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev); dai_link->platforms->name = dev_name(dev); + dai_link->init = vc4_hdmi_codec_init; card->dai_link = dai_link; card->num_links = 1; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index e3d989ca302b..a31157c99bee 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -4,6 +4,7 @@ #include <drm/drm_connector.h> #include <media/cec.h> #include <sound/dmaengine_pcm.h> +#include <sound/hdmi-codec.h> #include <sound/soc.h> #include "vc4_drv.h" @@ -211,6 +212,12 @@ struct vc4_hdmi { * KMS hooks. Protected by @mutex. */ enum hdmi_colorspace output_format; + + /** + * @hdmi_jack: Represents the connection state of the HDMI plug, for + * ALSA jack detection. + */ + struct snd_soc_jack hdmi_jack; }; #define connector_to_vc4_hdmi(_connector) \ diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index c5e84d3494d2..056d344c5411 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane, /* HPPF plane 1 */ vc4_dlist_write(vc4_state, kernel); /* VPPF plane 1 */ - vc4_dlist_write(vc4_state, kernel); + vc4_dlist_write(vc4_state, kernel); } } diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 37bb1fb58cf9..b611c7c8ca2d 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -53,25 +53,10 @@ static void vgem_fence_release(struct dma_fence *base) dma_fence_free(&fence->base); } -static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) -{ - snprintf(str, size, "%llu", fence->seqno); -} - -static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, - int size) -{ - snprintf(str, size, "%llu", - dma_fence_is_signaled(fence) ? fence->seqno : 0); -} - static const struct dma_fence_ops vgem_fence_ops = { .get_driver_name = vgem_fence_get_driver_name, .get_timeline_name = vgem_fence_get_timeline_name, .release = vgem_fence_release, - - .fence_value_str = vgem_fence_value_str, - .timeline_value_str = vgem_fence_timeline_value_str, }; static void vgem_fence_timeout(struct timer_list *t) diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f28357dbde35..44c1d8ef3c4d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f) return false; } -static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size) -{ - snprintf(str, size, "[%llu, %llu]", f->context, f->seqno); -} - -static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str, - int size) -{ - struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f); - - snprintf(str, size, "%llu", - (u64)atomic64_read(&fence->drv->last_fence_id)); -} - static const struct dma_fence_ops virtio_gpu_fence_ops = { .get_driver_name = virtio_gpu_get_driver_name, .get_timeline_name = virtio_gpu_get_timeline_name, .signaled = virtio_gpu_fence_signaled, - .fence_value_str = virtio_gpu_fence_value_str, - .timeline_value_str = virtio_gpu_timeline_value_str, }; struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 87e584add042..698ea7adb951 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -366,7 +366,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return 0; obj = new_state->fb->obj[0]; - if (bo->dumb || obj->import_attach) { + if (bo->dumb || drm_gem_is_imported(obj)) { vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); @@ -374,7 +374,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, return -ENOMEM; } - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj); if (ret) goto err_fence; @@ -417,7 +417,7 @@ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, } obj = state->fb->obj[0]; - if (obj->import_attach) + if (drm_gem_is_imported(obj)) virtio_gpu_cleanup_imported_obj(obj); } @@ -508,11 +508,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane, bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); - /* Only support mapped shmem bo */ - if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr) + if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base)) return -ENODEV; - iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr); + if (bo->base.vaddr) { + iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr); + } else { + struct drm_gem_shmem_object *shmem = &bo->base; + + if (!shmem->pages) + return -ENODEV; + /* map scanout buffer later */ + sb->pages = shmem->pages; + } sb->format = plane->state->fb->format; sb->height = plane->state->fb->height; diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 4de2a63ccd18..1118a0250279 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { - .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, .detach = drm_gem_map_detach, .map_dma_buf = virtgpu_gem_map_dma_buf, @@ -205,16 +204,15 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = obj->dev->dev_private; - struct dma_buf_attachment *attach = obj->import_attach; - if (attach) { - struct dma_buf *dmabuf = attach->dmabuf; + if (drm_gem_is_imported(obj)) { + struct dma_buf *dmabuf = obj->dma_buf; dma_resv_lock(dmabuf->resv, NULL); virtgpu_dma_buf_unmap(bo); dma_resv_unlock(dmabuf->resv); - dma_buf_detach(dmabuf, attach); + dma_buf_detach(dmabuf, obj->import_attach); dma_buf_put(dmabuf); } diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig index 9def079f685b..3c02f928ffe6 100644 --- a/drivers/gpu/drm/vkms/Kconfig +++ b/drivers/gpu/drm/vkms/Kconfig @@ -14,3 +14,18 @@ config DRM_VKMS a VKMS. If M is selected the module will be called vkms. + +config DRM_VKMS_KUNIT_TEST + tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS + depends on DRM_VKMS && KUNIT + default KUNIT_ALL_TESTS + help + This builds unit tests for VKMS. This option is not useful for + distributions or general kernels, but only for kernel + developers working on VKMS. + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in + Documentation/dev-tools/kunit/. + + If in doubt, say "N". diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 1b28a6a32948..d657865e573f 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -6,6 +6,9 @@ vkms-y := \ vkms_formats.o \ vkms_crtc.o \ vkms_composer.o \ - vkms_writeback.o + vkms_writeback.o \ + vkms_connector.o \ + vkms_config.o obj-$(CONFIG_DRM_VKMS) += vkms.o +obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig new file mode 100644 index 000000000000..6a2d87068edc --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/.kunitconfig @@ -0,0 +1,4 @@ +CONFIG_KUNIT=y +CONFIG_DRM=y +CONFIG_DRM_VKMS=y +CONFIG_DRM_VKMS_KUNIT_TEST=y diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile new file mode 100644 index 000000000000..9ded37b67a46 --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c new file mode 100644 index 000000000000..ff4566cf9925 --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <kunit/test.h> + +#include "../vkms_config.h" + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static size_t vkms_config_get_num_planes(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg; + size_t count = 0; + + vkms_config_for_each_plane(config, plane_cfg) + count++; + + return count; +} + +static size_t vkms_config_get_num_encoders(struct vkms_config *config) +{ + struct vkms_config_encoder *encoder_cfg; + size_t count = 0; + + vkms_config_for_each_encoder(config, encoder_cfg) + count++; + + return count; +} + +static size_t vkms_config_get_num_connectors(struct vkms_config *config) +{ + struct vkms_config_connector *connector_cfg; + size_t count = 0; + + vkms_config_for_each_connector(config, connector_cfg) + count++; + + return count; +} + +static struct vkms_config_plane *get_first_plane(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg; + + vkms_config_for_each_plane(config, plane_cfg) + return plane_cfg; + + return NULL; +} + +static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config) +{ + struct vkms_config_crtc *crtc_cfg; + + vkms_config_for_each_crtc(config, crtc_cfg) + return crtc_cfg; + + return NULL; +} + +static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config) +{ + struct vkms_config_encoder *encoder_cfg; + + vkms_config_for_each_encoder(config, encoder_cfg) + return encoder_cfg; + + return NULL; +} + +static struct vkms_config_connector *get_first_connector(struct vkms_config *config) +{ + struct vkms_config_connector *connector_cfg; + + vkms_config_for_each_connector(config, connector_cfg) + return connector_cfg; + + return NULL; +} + +struct default_config_case { + bool enable_cursor; + bool enable_writeback; + bool enable_overlay; +}; + +static void vkms_config_test_empty_config(struct kunit *test) +{ + struct vkms_config *config; + const char *dev_name = "test"; + + config = vkms_config_create(dev_name); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* The dev_name string and the config have different lifetimes */ + dev_name = NULL; + KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test"); + + KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static struct default_config_case default_config_cases[] = { + { false, false, false }, + { true, false, false }, + { true, true, false }, + { true, false, true }, + { false, true, false }, + { false, true, true }, + { false, false, true }, + { true, true, true }, +}; + +KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL); + +static void vkms_config_test_default_config(struct kunit *test) +{ + const struct default_config_case *params = test->param_value; + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + int n_primaries = 0; + int n_cursors = 0; + int n_overlays = 0; + + config = vkms_config_default_create(params->enable_cursor, + params->enable_writeback, + params->enable_overlay); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Planes */ + vkms_config_for_each_plane(config, plane_cfg) { + switch (vkms_config_plane_get_type(plane_cfg)) { + case DRM_PLANE_TYPE_PRIMARY: + n_primaries++; + break; + case DRM_PLANE_TYPE_CURSOR: + n_cursors++; + break; + case DRM_PLANE_TYPE_OVERLAY: + n_overlays++; + break; + default: + KUNIT_FAIL_AND_ABORT(test, "Unknown plane type"); + } + } + KUNIT_EXPECT_EQ(test, n_primaries, 1); + KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0); + KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0); + + /* CRTCs */ + KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1); + + crtc_cfg = get_first_crtc(config); + KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg), + params->enable_writeback); + + vkms_config_for_each_plane(config, plane_cfg) { + struct vkms_config_crtc *possible_crtc; + int n_possible_crtcs = 0; + unsigned long idx = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc); + n_possible_crtcs++; + } + KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1); + } + + /* Encoders */ + KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1); + + /* Connectors */ + KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1); + + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_get_planes(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_plane *plane_cfg1, *plane_cfg2; + int n_planes = 0; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_for_each_plane(config, plane_cfg) + n_planes++; + KUNIT_ASSERT_EQ(test, n_planes, 0); + + plane_cfg1 = vkms_config_create_plane(config); + vkms_config_for_each_plane(config, plane_cfg) { + n_planes++; + if (plane_cfg != plane_cfg1) + KUNIT_FAIL(test, "Unexpected plane"); + } + KUNIT_ASSERT_EQ(test, n_planes, 1); + n_planes = 0; + + plane_cfg2 = vkms_config_create_plane(config); + vkms_config_for_each_plane(config, plane_cfg) { + n_planes++; + if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2) + KUNIT_FAIL(test, "Unexpected plane"); + } + KUNIT_ASSERT_EQ(test, n_planes, 2); + n_planes = 0; + + vkms_config_destroy_plane(plane_cfg1); + vkms_config_for_each_plane(config, plane_cfg) { + n_planes++; + if (plane_cfg != plane_cfg2) + KUNIT_FAIL(test, "Unexpected plane"); + } + KUNIT_ASSERT_EQ(test, n_planes, 1); + + vkms_config_destroy(config); +} + +static void vkms_config_test_get_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0); + vkms_config_for_each_crtc(config, crtc_cfg) + KUNIT_FAIL(test, "Unexpected CRTC"); + + crtc_cfg1 = vkms_config_create_crtc(config); + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1); + vkms_config_for_each_crtc(config, crtc_cfg) { + if (crtc_cfg != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected CRTC"); + } + + crtc_cfg2 = vkms_config_create_crtc(config); + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2); + vkms_config_for_each_crtc(config, crtc_cfg) { + if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected CRTC"); + } + + vkms_config_destroy_crtc(config, crtc_cfg2); + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1); + vkms_config_for_each_crtc(config, crtc_cfg) { + if (crtc_cfg != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected CRTC"); + } + + vkms_config_destroy(config); +} + +static void vkms_config_test_get_encoders(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + int n_encoders = 0; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_for_each_encoder(config, encoder_cfg) + n_encoders++; + KUNIT_ASSERT_EQ(test, n_encoders, 0); + + encoder_cfg1 = vkms_config_create_encoder(config); + vkms_config_for_each_encoder(config, encoder_cfg) { + n_encoders++; + if (encoder_cfg != encoder_cfg1) + KUNIT_FAIL(test, "Unexpected encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + n_encoders = 0; + + encoder_cfg2 = vkms_config_create_encoder(config); + vkms_config_for_each_encoder(config, encoder_cfg) { + n_encoders++; + if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2) + KUNIT_FAIL(test, "Unexpected encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 2); + n_encoders = 0; + + vkms_config_destroy_encoder(config, encoder_cfg2); + vkms_config_for_each_encoder(config, encoder_cfg) { + n_encoders++; + if (encoder_cfg != encoder_cfg1) + KUNIT_FAIL(test, "Unexpected encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + n_encoders = 0; + + vkms_config_destroy(config); +} + +static void vkms_config_test_get_connectors(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg; + struct vkms_config_connector *connector_cfg1, *connector_cfg2; + int n_connectors = 0; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_for_each_connector(config, connector_cfg) + n_connectors++; + KUNIT_ASSERT_EQ(test, n_connectors, 0); + + connector_cfg1 = vkms_config_create_connector(config); + vkms_config_for_each_connector(config, connector_cfg) { + n_connectors++; + if (connector_cfg != connector_cfg1) + KUNIT_FAIL(test, "Unexpected connector"); + } + KUNIT_ASSERT_EQ(test, n_connectors, 1); + n_connectors = 0; + + connector_cfg2 = vkms_config_create_connector(config); + vkms_config_for_each_connector(config, connector_cfg) { + n_connectors++; + if (connector_cfg != connector_cfg1 && + connector_cfg != connector_cfg2) + KUNIT_FAIL(test, "Unexpected connector"); + } + KUNIT_ASSERT_EQ(test, n_connectors, 2); + n_connectors = 0; + + vkms_config_destroy_connector(connector_cfg2); + vkms_config_for_each_connector(config, connector_cfg) { + n_connectors++; + if (connector_cfg != connector_cfg1) + KUNIT_FAIL(test, "Unexpected connector"); + } + KUNIT_ASSERT_EQ(test, n_connectors, 1); + n_connectors = 0; + + vkms_config_destroy(config); +} + +static void vkms_config_test_invalid_plane_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No planes */ + plane_cfg = get_first_plane(config); + vkms_config_destroy_plane(plane_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many planes */ + for (n = 0; n <= 32; n++) + vkms_config_create_plane(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_valid_plane_type(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; + int err; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + plane_cfg = get_first_plane(config); + vkms_config_destroy_plane(plane_cfg); + + crtc_cfg = get_first_crtc(config); + + /* Invalid: No primary plane */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Multiple primary planes */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: One primary plane */ + vkms_config_destroy_plane(plane_cfg); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + /* Invalid: Multiple cursor planes */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: One primary and one cursor plane */ + vkms_config_destroy_plane(plane_cfg); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + /* Invalid: Second CRTC without primary plane */ + crtc_cfg = vkms_config_create_crtc(config); + encoder_cfg = vkms_config_create_encoder(config); + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: Second CRTC with a primary plane */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + plane_cfg = get_first_plane(config); + crtc_cfg = get_first_crtc(config); + + /* Invalid: Primary plane without a possible CRTC */ + vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_invalid_crtc_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_crtc *crtc_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No CRTCs */ + crtc_cfg = get_first_crtc(config); + vkms_config_destroy_crtc(config, crtc_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many CRTCs */ + for (n = 0; n <= 32; n++) + vkms_config_create_crtc(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_invalid_encoder_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No encoders */ + encoder_cfg = get_first_encoder(config); + vkms_config_destroy_encoder(config, encoder_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many encoders */ + for (n = 0; n <= 32; n++) + vkms_config_create_encoder(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_encoder *encoder_cfg; + int err; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + crtc_cfg1 = get_first_crtc(config); + + /* Invalid: Encoder without a possible CRTC */ + encoder_cfg = vkms_config_create_encoder(config); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: Second CRTC with shared encoder */ + crtc_cfg2 = vkms_config_create_crtc(config); + + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + /* Invalid: Second CRTC without encoders */ + vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: First CRTC with 2 possible encoder */ + vkms_config_destroy_plane(plane_cfg); + vkms_config_destroy_crtc(config, crtc_cfg2); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_invalid_connector_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No connectors */ + connector_cfg = get_first_connector(config); + vkms_config_destroy_connector(connector_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many connectors */ + for (n = 0; n <= 32; n++) + connector_cfg = vkms_config_create_connector(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + encoder_cfg = get_first_encoder(config); + connector_cfg = get_first_connector(config); + + /* Invalid: Connector without a possible encoder */ + vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_attach_different_configs(struct kunit *test) +{ + struct vkms_config *config1, *config2; + struct vkms_config_plane *plane_cfg1, *plane_cfg2; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + struct vkms_config_connector *connector_cfg1, *connector_cfg2; + int err; + + config1 = vkms_config_create("test1"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1); + + config2 = vkms_config_create("test2"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2); + + plane_cfg1 = vkms_config_create_plane(config1); + crtc_cfg1 = vkms_config_create_crtc(config1); + encoder_cfg1 = vkms_config_create_encoder(config1); + connector_cfg1 = vkms_config_create_connector(config1); + + plane_cfg2 = vkms_config_create_plane(config2); + crtc_cfg2 = vkms_config_create_crtc(config2); + encoder_cfg2 = vkms_config_create_encoder(config2); + connector_cfg2 = vkms_config_create_connector(config2); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2); + + err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2); + KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1); + KUNIT_EXPECT_NE(test, err, 0); + + err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2); + KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1); + KUNIT_EXPECT_NE(test, err, 0); + + err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2); + KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1); + KUNIT_EXPECT_NE(test, err, 0); + + vkms_config_destroy(config1); + vkms_config_destroy(config2); +} + +static void vkms_config_test_plane_attach_crtc(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *overlay_cfg; + struct vkms_config_plane *primary_cfg; + struct vkms_config_plane *cursor_cfg; + struct vkms_config_crtc *crtc_cfg; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + overlay_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY); + primary_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY); + cursor_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR); + + crtc_cfg = vkms_config_create_crtc(config); + + /* No primary or cursor planes */ + KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + /* Overlay plane, but no primary or cursor planes */ + err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + /* Primary plane, attaching it twice must fail */ + err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg); + KUNIT_EXPECT_NE(test, err, 0); + KUNIT_EXPECT_PTR_EQ(test, + vkms_config_crtc_primary_plane(config, crtc_cfg), + primary_cfg); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + /* Primary and cursor planes */ + err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_PTR_EQ(test, + vkms_config_crtc_primary_plane(config, crtc_cfg), + primary_cfg); + KUNIT_EXPECT_PTR_EQ(test, + vkms_config_crtc_cursor_plane(config, crtc_cfg), + cursor_cfg); + + /* Detach primary and destroy cursor plane */ + vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg); + vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg); + vkms_config_destroy_plane(cursor_cfg); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg1, *plane_cfg2; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + int n_crtcs = 0; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + plane_cfg1 = vkms_config_create_plane(config); + plane_cfg2 = vkms_config_create_plane(config); + crtc_cfg1 = vkms_config_create_crtc(config); + crtc_cfg2 = vkms_config_create_crtc(config); + + /* No possible CRTCs */ + vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Plane 1 attached to CRTC 1 and 2 */ + err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 2); + n_crtcs = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */ + vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2); + vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + n_crtcs = 0; + + err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + + vkms_config_destroy(config); +} + +static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + int n_crtcs = 0; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + encoder_cfg1 = vkms_config_create_encoder(config); + encoder_cfg2 = vkms_config_create_encoder(config); + crtc_cfg1 = vkms_config_create_crtc(config); + crtc_cfg2 = vkms_config_create_crtc(config); + + /* No possible CRTCs */ + vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Encoder 1 attached to CRTC 1 and 2 */ + err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 2); + n_crtcs = 0; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */ + vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2); + vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + n_crtcs = 0; + + err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + + vkms_config_destroy(config); +} + +static void vkms_config_test_connector_get_possible_encoders(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg1, *connector_cfg2; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + int n_encoders = 0; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + connector_cfg1 = vkms_config_create_connector(config); + connector_cfg2 = vkms_config_create_connector(config); + encoder_cfg1 = vkms_config_create_encoder(config); + encoder_cfg2 = vkms_config_create_encoder(config); + + /* No possible encoders */ + vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, + possible_encoder) + KUNIT_FAIL(test, "Unexpected possible encoder"); + + vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx, + possible_encoder) + KUNIT_FAIL(test, "Unexpected possible encoder"); + + /* Connector 1 attached to encoders 1 and 2 */ + err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, + possible_encoder) { + n_encoders++; + if (possible_encoder != encoder_cfg1 && + possible_encoder != encoder_cfg2) + KUNIT_FAIL(test, "Unexpected possible encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 2); + n_encoders = 0; + + vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx, + possible_encoder) + KUNIT_FAIL(test, "Unexpected possible encoder"); + + /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */ + vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2); + vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, + possible_encoder) { + n_encoders++; + if (possible_encoder != encoder_cfg1) + KUNIT_FAIL(test, "Unexpected possible encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + n_encoders = 0; + + err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx, + possible_encoder) { + n_encoders++; + if (possible_encoder != encoder_cfg2) + KUNIT_FAIL(test, "Unexpected possible encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + + vkms_config_destroy(config); +} + +static struct kunit_case vkms_config_test_cases[] = { + KUNIT_CASE(vkms_config_test_empty_config), + KUNIT_CASE_PARAM(vkms_config_test_default_config, + default_config_gen_params), + KUNIT_CASE(vkms_config_test_get_planes), + KUNIT_CASE(vkms_config_test_get_crtcs), + KUNIT_CASE(vkms_config_test_get_encoders), + KUNIT_CASE(vkms_config_test_get_connectors), + KUNIT_CASE(vkms_config_test_invalid_plane_number), + KUNIT_CASE(vkms_config_test_valid_plane_type), + KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs), + KUNIT_CASE(vkms_config_test_invalid_crtc_number), + KUNIT_CASE(vkms_config_test_invalid_encoder_number), + KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs), + KUNIT_CASE(vkms_config_test_invalid_connector_number), + KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders), + KUNIT_CASE(vkms_config_test_attach_different_configs), + KUNIT_CASE(vkms_config_test_plane_attach_crtc), + KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), + KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs), + KUNIT_CASE(vkms_config_test_connector_get_possible_encoders), + {} +}; + +static struct kunit_suite vkms_config_test_suite = { + .name = "vkms-config", + .test_cases = vkms_config_test_cases, +}; + +kunit_test_suite(vkms_config_test_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Kunit test for vkms config utility"); diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c new file mode 100644 index 000000000000..a1df5659b0fb --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/slab.h> + +#include <drm/drm_print.h> +#include <drm/drm_debugfs.h> +#include <kunit/visibility.h> + +#include "vkms_config.h" + +struct vkms_config *vkms_config_create(const char *dev_name) +{ + struct vkms_config *config; + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) + return ERR_PTR(-ENOMEM); + + config->dev_name = kstrdup_const(dev_name, GFP_KERNEL); + if (!config->dev_name) { + kfree(config); + return ERR_PTR(-ENOMEM); + } + + INIT_LIST_HEAD(&config->planes); + INIT_LIST_HEAD(&config->crtcs); + INIT_LIST_HEAD(&config->encoders); + INIT_LIST_HEAD(&config->connectors); + + return config; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); + +struct vkms_config *vkms_config_default_create(bool enable_cursor, + bool enable_writeback, + bool enable_overlay) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; + int n; + + config = vkms_config_create(DEFAULT_DEVICE_NAME); + if (IS_ERR(config)) + return config; + + plane_cfg = vkms_config_create_plane(config); + if (IS_ERR(plane_cfg)) + goto err_alloc; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + + crtc_cfg = vkms_config_create_crtc(config); + if (IS_ERR(crtc_cfg)) + goto err_alloc; + vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback); + + if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) + goto err_alloc; + + if (enable_overlay) { + for (n = 0; n < NUM_OVERLAY_PLANES; n++) { + plane_cfg = vkms_config_create_plane(config); + if (IS_ERR(plane_cfg)) + goto err_alloc; + + vkms_config_plane_set_type(plane_cfg, + DRM_PLANE_TYPE_OVERLAY); + + if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) + goto err_alloc; + } + } + + if (enable_cursor) { + plane_cfg = vkms_config_create_plane(config); + if (IS_ERR(plane_cfg)) + goto err_alloc; + + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + + if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) + goto err_alloc; + } + + encoder_cfg = vkms_config_create_encoder(config); + if (IS_ERR(encoder_cfg)) + goto err_alloc; + + if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg)) + goto err_alloc; + + connector_cfg = vkms_config_create_connector(config); + if (IS_ERR(connector_cfg)) + goto err_alloc; + + if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg)) + goto err_alloc; + + return config; + +err_alloc: + vkms_config_destroy(config); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create); + +void vkms_config_destroy(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg, *plane_tmp; + struct vkms_config_crtc *crtc_cfg, *crtc_tmp; + struct vkms_config_encoder *encoder_cfg, *encoder_tmp; + struct vkms_config_connector *connector_cfg, *connector_tmp; + + list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link) + vkms_config_destroy_plane(plane_cfg); + + list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link) + vkms_config_destroy_crtc(config, crtc_cfg); + + list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link) + vkms_config_destroy_encoder(config, encoder_cfg); + + list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link) + vkms_config_destroy_connector(connector_cfg); + + kfree_const(config->dev_name); + kfree(config); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy); + +static bool valid_plane_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_planes; + + n_planes = list_count_nodes((struct list_head *)&config->planes); + if (n_planes <= 0 || n_planes >= 32) { + drm_info(dev, "The number of planes must be between 1 and 31\n"); + return false; + } + + return true; +} + +static bool valid_planes_for_crtc(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_plane *plane_cfg; + bool has_primary_plane = false; + bool has_cursor_plane = false; + + vkms_config_for_each_plane(config, plane_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + enum drm_plane_type type; + + type = vkms_config_plane_get_type(plane_cfg); + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc != crtc_cfg) + continue; + + if (type == DRM_PLANE_TYPE_PRIMARY) { + if (has_primary_plane) { + drm_info(dev, "Multiple primary planes\n"); + return false; + } + + has_primary_plane = true; + } else if (type == DRM_PLANE_TYPE_CURSOR) { + if (has_cursor_plane) { + drm_info(dev, "Multiple cursor planes\n"); + return false; + } + + has_cursor_plane = true; + } + } + } + + if (!has_primary_plane) { + drm_info(dev, "Primary plane not found\n"); + return false; + } + + return true; +} + +static bool valid_plane_possible_crtcs(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_plane *plane_cfg; + + vkms_config_for_each_plane(config, plane_cfg) { + if (xa_empty(&plane_cfg->possible_crtcs)) { + drm_info(dev, "All planes must have at least one possible CRTC\n"); + return false; + } + } + + return true; +} + +static bool valid_crtc_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_crtcs; + + n_crtcs = list_count_nodes((struct list_head *)&config->crtcs); + if (n_crtcs <= 0 || n_crtcs >= 32) { + drm_info(dev, "The number of CRTCs must be between 1 and 31\n"); + return false; + } + + return true; +} + +static bool valid_encoder_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_encoders; + + n_encoders = list_count_nodes((struct list_head *)&config->encoders); + if (n_encoders <= 0 || n_encoders >= 32) { + drm_info(dev, "The number of encoders must be between 1 and 31\n"); + return false; + } + + return true; +} + +static bool valid_encoder_possible_crtcs(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; + + vkms_config_for_each_encoder(config, encoder_cfg) { + if (xa_empty(&encoder_cfg->possible_crtcs)) { + drm_info(dev, "All encoders must have at least one possible CRTC\n"); + return false; + } + } + + vkms_config_for_each_crtc(config, crtc_cfg) { + bool crtc_has_encoder = false; + + vkms_config_for_each_encoder(config, encoder_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, + idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + crtc_has_encoder = true; + } + } + + if (!crtc_has_encoder) { + drm_info(dev, "All CRTCs must have at least one possible encoder\n"); + return false; + } + } + + return true; +} + +static bool valid_connector_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_connectors; + + n_connectors = list_count_nodes((struct list_head *)&config->connectors); + if (n_connectors <= 0 || n_connectors >= 32) { + drm_info(dev, "The number of connectors must be between 1 and 31\n"); + return false; + } + + return true; +} + +static bool valid_connector_possible_encoders(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_connector *connector_cfg; + + vkms_config_for_each_connector(config, connector_cfg) { + if (xa_empty(&connector_cfg->possible_encoders)) { + drm_info(dev, + "All connectors must have at least one possible encoder\n"); + return false; + } + } + + return true; +} + +bool vkms_config_is_valid(const struct vkms_config *config) +{ + struct vkms_config_crtc *crtc_cfg; + + if (!valid_plane_number(config)) + return false; + + if (!valid_crtc_number(config)) + return false; + + if (!valid_encoder_number(config)) + return false; + + if (!valid_connector_number(config)) + return false; + + if (!valid_plane_possible_crtcs(config)) + return false; + + vkms_config_for_each_crtc(config, crtc_cfg) { + if (!valid_planes_for_crtc(config, crtc_cfg)) + return false; + } + + if (!valid_encoder_possible_crtcs(config)) + return false; + + if (!valid_connector_possible_encoders(config)) + return false; + + return true; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid); + +static int vkms_config_show(struct seq_file *m, void *data) +{ + struct drm_debugfs_entry *entry = m->private; + struct drm_device *dev = entry->dev; + struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + const char *dev_name; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; + + dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config); + seq_printf(m, "dev_name=%s\n", dev_name); + + vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { + seq_puts(m, "plane:\n"); + seq_printf(m, "\ttype=%d\n", + vkms_config_plane_get_type(plane_cfg)); + } + + vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) { + seq_puts(m, "crtc:\n"); + seq_printf(m, "\twriteback=%d\n", + vkms_config_crtc_get_writeback(crtc_cfg)); + } + + vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) + seq_puts(m, "encoder\n"); + + vkms_config_for_each_connector(vkmsdev->config, connector_cfg) + seq_puts(m, "connector\n"); + + return 0; +} + +static const struct drm_debugfs_info vkms_config_debugfs_list[] = { + { "vkms_config", vkms_config_show, 0 }, +}; + +void vkms_config_register_debugfs(struct vkms_device *vkms_device) +{ + drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list, + ARRAY_SIZE(vkms_config_debugfs_list)); +} + +struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg; + + plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL); + if (!plane_cfg) + return ERR_PTR(-ENOMEM); + + plane_cfg->config = config; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC); + + list_add_tail(&plane_cfg->link, &config->planes); + + return plane_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane); + +void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg) +{ + xa_destroy(&plane_cfg->possible_crtcs); + list_del(&plane_cfg->link); + kfree(plane_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane); + +int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + u32 crtc_idx = 0; + + if (plane_cfg->config != crtc_cfg->config) + return -EINVAL; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + return -EEXIST; + } + + return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg, + xa_limit_32b, GFP_KERNEL); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc); + +void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + xa_erase(&plane_cfg->possible_crtcs, idx); + } +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc); + +struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config) +{ + struct vkms_config_crtc *crtc_cfg; + + crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL); + if (!crtc_cfg) + return ERR_PTR(-ENOMEM); + + crtc_cfg->config = config; + vkms_config_crtc_set_writeback(crtc_cfg, false); + + list_add_tail(&crtc_cfg->link, &config->crtcs); + + return crtc_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc); + +void vkms_config_destroy_crtc(struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_plane *plane_cfg; + struct vkms_config_encoder *encoder_cfg; + + vkms_config_for_each_plane(config, plane_cfg) + vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg); + + vkms_config_for_each_encoder(config, encoder_cfg) + vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg); + + list_del(&crtc_cfg->link); + kfree(crtc_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc); + +/** + * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with + * the specific type + * @config: Configuration containing the CRTC and the plane + * @crtc_cfg: Only find planes attached to this CRTC + * @type: Plane type to search + * + * Returns: + * The first plane found attached to @crtc_cfg with the type @type. + */ +static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg, + enum drm_plane_type type) +{ + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *possible_crtc; + enum drm_plane_type current_type; + unsigned long idx = 0; + + vkms_config_for_each_plane(config, plane_cfg) { + current_type = vkms_config_plane_get_type(plane_cfg); + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg && current_type == type) + return plane_cfg; + } + } + + return NULL; +} + +struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane); + +struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane); + +struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config) +{ + struct vkms_config_encoder *encoder_cfg; + + encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL); + if (!encoder_cfg) + return ERR_PTR(-ENOMEM); + + encoder_cfg->config = config; + xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC); + + list_add_tail(&encoder_cfg->link, &config->encoders); + + return encoder_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder); + +void vkms_config_destroy_encoder(struct vkms_config *config, + struct vkms_config_encoder *encoder_cfg) +{ + struct vkms_config_connector *connector_cfg; + + vkms_config_for_each_connector(config, connector_cfg) + vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg); + + xa_destroy(&encoder_cfg->possible_crtcs); + list_del(&encoder_cfg->link); + kfree(encoder_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder); + +int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + u32 crtc_idx = 0; + + if (encoder_cfg->config != crtc_cfg->config) + return -EINVAL; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + return -EEXIST; + } + + return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg, + xa_limit_32b, GFP_KERNEL); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc); + +void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + xa_erase(&encoder_cfg->possible_crtcs, idx); + } +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc); + +struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config) +{ + struct vkms_config_connector *connector_cfg; + + connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL); + if (!connector_cfg) + return ERR_PTR(-ENOMEM); + + connector_cfg->config = config; + xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC); + + list_add_tail(&connector_cfg->link, &config->connectors); + + return connector_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector); + +void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg) +{ + xa_destroy(&connector_cfg->possible_encoders); + list_del(&connector_cfg->link); + kfree(connector_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector); + +int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg) +{ + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + u32 encoder_idx = 0; + + if (connector_cfg->config != encoder_cfg->config) + return -EINVAL; + + vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, + possible_encoder) { + if (possible_encoder == encoder_cfg) + return -EEXIST; + } + + return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx, + encoder_cfg, xa_limit_32b, GFP_KERNEL); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder); + +void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg) +{ + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + + vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, + possible_encoder) { + if (possible_encoder == encoder_cfg) + xa_erase(&connector_cfg->possible_encoders, idx); + } +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h new file mode 100644 index 000000000000..0118e3f99706 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -0,0 +1,437 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_CONFIG_H_ +#define _VKMS_CONFIG_H_ + +#include <linux/list.h> +#include <linux/types.h> +#include <linux/xarray.h> + +#include "vkms_drv.h" + +/** + * struct vkms_config - General configuration for VKMS driver + * + * @dev_name: Name of the device + * @planes: List of planes configured for the device + * @crtcs: List of CRTCs configured for the device + * @encoders: List of encoders configured for the device + * @connectors: List of connectors configured for the device + * @dev: Used to store the current VKMS device. Only set when the device is instantiated. + */ +struct vkms_config { + const char *dev_name; + struct list_head planes; + struct list_head crtcs; + struct list_head encoders; + struct list_head connectors; + struct vkms_device *dev; +}; + +/** + * struct vkms_config_plane + * + * @link: Link to the others planes in vkms_config + * @config: The vkms_config this plane belongs to + * @type: Type of the plane. The creator of configuration needs to ensures that + * at least one primary plane is present. + * @possible_crtcs: Array of CRTCs that can be used with this plane + * @plane: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS plane during + * device creation. This pointer is not managed by the configuration and + * must be managed by other means. + */ +struct vkms_config_plane { + struct list_head link; + struct vkms_config *config; + + enum drm_plane_type type; + struct xarray possible_crtcs; + + /* Internal usage */ + struct vkms_plane *plane; +}; + +/** + * struct vkms_config_crtc + * + * @link: Link to the others CRTCs in vkms_config + * @config: The vkms_config this CRTC belongs to + * @writeback: If true, a writeback buffer can be attached to the CRTC + * @crtc: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS CRTC during + * device creation. This pointer is not managed by the configuration and + * must be managed by other means. + */ +struct vkms_config_crtc { + struct list_head link; + struct vkms_config *config; + + bool writeback; + + /* Internal usage */ + struct vkms_output *crtc; +}; + +/** + * struct vkms_config_encoder + * + * @link: Link to the others encoders in vkms_config + * @config: The vkms_config this CRTC belongs to + * @possible_crtcs: Array of CRTCs that can be used with this encoder + * @encoder: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS encoder + * during device creation. This pointer is not managed by the + * configuration and must be managed by other means. + */ +struct vkms_config_encoder { + struct list_head link; + struct vkms_config *config; + + struct xarray possible_crtcs; + + /* Internal usage */ + struct drm_encoder *encoder; +}; + +/** + * struct vkms_config_connector + * + * @link: Link to the others connector in vkms_config + * @config: The vkms_config this connector belongs to + * @possible_encoders: Array of encoders that can be used with this connector + * @connector: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS connector + * during device creation. This pointer is not managed by the + * configuration and must be managed by other means. + */ +struct vkms_config_connector { + struct list_head link; + struct vkms_config *config; + + struct xarray possible_encoders; + + /* Internal usage */ + struct vkms_connector *connector; +}; + +/** + * vkms_config_for_each_plane - Iterate over the vkms_config planes + * @config: &struct vkms_config pointer + * @plane_cfg: &struct vkms_config_plane pointer used as cursor + */ +#define vkms_config_for_each_plane(config, plane_cfg) \ + list_for_each_entry((plane_cfg), &(config)->planes, link) + +/** + * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs + * @config: &struct vkms_config pointer + * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor + */ +#define vkms_config_for_each_crtc(config, crtc_cfg) \ + list_for_each_entry((crtc_cfg), &(config)->crtcs, link) + +/** + * vkms_config_for_each_encoder - Iterate over the vkms_config encoders + * @config: &struct vkms_config pointer + * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor + */ +#define vkms_config_for_each_encoder(config, encoder_cfg) \ + list_for_each_entry((encoder_cfg), &(config)->encoders, link) + +/** + * vkms_config_for_each_connector - Iterate over the vkms_config connectors + * @config: &struct vkms_config pointer + * @connector_cfg: &struct vkms_config_connector pointer used as cursor + */ +#define vkms_config_for_each_connector(config, connector_cfg) \ + list_for_each_entry((connector_cfg), &(config)->connectors, link) + +/** + * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane + * possible CRTCs + * @plane_cfg: &struct vkms_config_plane pointer + * @idx: Index of the cursor + * @possible_crtc: &struct vkms_config_crtc pointer used as cursor + */ +#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \ + xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc)) + +/** + * vkms_config_encoder_for_each_possible_crtc - Iterate over the + * vkms_config_encoder possible CRTCs + * @encoder_cfg: &struct vkms_config_encoder pointer + * @idx: Index of the cursor + * @possible_crtc: &struct vkms_config_crtc pointer used as cursor + */ +#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \ + xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc)) + +/** + * vkms_config_connector_for_each_possible_encoder - Iterate over the + * vkms_config_connector possible encoders + * @connector_cfg: &struct vkms_config_connector pointer + * @idx: Index of the cursor + * @possible_encoder: &struct vkms_config_encoder pointer used as cursor + */ +#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \ + xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder)) + +/** + * vkms_config_create() - Create a new VKMS configuration + * @dev_name: Name of the device + * + * Returns: + * The new vkms_config or an error. Call vkms_config_destroy() to free the + * returned configuration. + */ +struct vkms_config *vkms_config_create(const char *dev_name); + +/** + * vkms_config_default_create() - Create the configuration for the default device + * @enable_cursor: Create or not a cursor plane + * @enable_writeback: Create or not a writeback connector + * @enable_overlay: Create or not overlay planes + * + * Returns: + * The default vkms_config or an error. Call vkms_config_destroy() to free the + * returned configuration. + */ +struct vkms_config *vkms_config_default_create(bool enable_cursor, + bool enable_writeback, + bool enable_overlay); + +/** + * vkms_config_destroy() - Free a VKMS configuration + * @config: vkms_config to free + */ +void vkms_config_destroy(struct vkms_config *config); + +/** + * vkms_config_get_device_name() - Return the name of the device + * @config: Configuration to get the device name from + * + * Returns: + * The device name. Only valid while @config is valid. + */ +static inline const char * +vkms_config_get_device_name(struct vkms_config *config) +{ + return config->dev_name; +} + +/** + * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration + * @config: Configuration to get the number of CRTCs from + */ +static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config) +{ + return list_count_nodes(&config->crtcs); +} + +/** + * vkms_config_is_valid() - Validate a configuration + * @config: Configuration to validate + * + * Returns: + * Whether the configuration is valid or not. + * For example, a configuration without primary planes is not valid. + */ +bool vkms_config_is_valid(const struct vkms_config *config); + +/** + * vkms_config_register_debugfs() - Register a debugfs file to show the device's + * configuration + * @vkms_device: Device to register + */ +void vkms_config_register_debugfs(struct vkms_device *vkms_device); + +/** + * vkms_config_create_plane() - Add a new plane configuration + * @config: Configuration to add the plane to + * + * Returns: + * The new plane configuration or an error. Call vkms_config_destroy_plane() to + * free the returned plane configuration. + */ +struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config); + +/** + * vkms_config_destroy_plane() - Remove and free a plane configuration + * @plane_cfg: Plane configuration to destroy + */ +void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg); + +/** + * vkms_config_plane_type() - Return the plane type + * @plane_cfg: Plane to get the type from + */ +static inline enum drm_plane_type +vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg) +{ + return plane_cfg->type; +} + +/** + * vkms_config_plane_set_type() - Set the plane type + * @plane_cfg: Plane to set the type to + * @type: New plane type + */ +static inline void +vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg, + enum drm_plane_type type) +{ + plane_cfg->type = type; +} + +/** + * vkms_config_plane_attach_crtc - Attach a plane to a CRTC + * @plane_cfg: Plane to attach + * @crtc_cfg: CRTC to attach @plane_cfg to + */ +int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_plane_detach_crtc - Detach a plane from a CRTC + * @plane_cfg: Plane to detach + * @crtc_cfg: CRTC to detach @plane_cfg from + */ +void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_create_crtc() - Add a new CRTC configuration + * @config: Configuration to add the CRTC to + * + * Returns: + * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to + * free the returned CRTC configuration. + */ +struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config); + +/** + * vkms_config_destroy_crtc() - Remove and free a CRTC configuration + * @config: Configuration to remove the CRTC from + * @crtc_cfg: CRTC configuration to destroy + */ +void vkms_config_destroy_crtc(struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_crtc_get_writeback() - If a writeback connector will be created + * @crtc_cfg: CRTC with or without a writeback connector + */ +static inline bool +vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg) +{ + return crtc_cfg->writeback; +} + +/** + * vkms_config_crtc_set_writeback() - If a writeback connector will be created + * @crtc_cfg: Target CRTC + * @writeback: Enable or disable the writeback connector + */ +static inline void +vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg, + bool writeback) +{ + crtc_cfg->writeback = writeback; +} + +/** + * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC + * @config: Configuration containing the CRTC + * @crtc_config: Target CRTC + * + * Note that, if multiple primary planes are found, the first one is returned. + * In this case, the configuration will be invalid. See vkms_config_is_valid(). + * + * Returns: + * The primary plane or NULL if none is assigned yet. + */ +struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC + * @config: Configuration containing the CRTC + * @crtc_config: Target CRTC + * + * Note that, if multiple cursor planes are found, the first one is returned. + * In this case, the configuration will be invalid. See vkms_config_is_valid(). + * + * Returns: + * The cursor plane or NULL if none is assigned yet. + */ +struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_create_encoder() - Add a new encoder configuration + * @config: Configuration to add the encoder to + * + * Returns: + * The new encoder configuration or an error. Call vkms_config_destroy_encoder() + * to free the returned encoder configuration. + */ +struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config); + +/** + * vkms_config_destroy_encoder() - Remove and free a encoder configuration + * @config: Configuration to remove the encoder from + * @encoder_cfg: Encoder configuration to destroy + */ +void vkms_config_destroy_encoder(struct vkms_config *config, + struct vkms_config_encoder *encoder_cfg); + +/** + * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC + * @encoder_cfg: Encoder to attach + * @crtc_cfg: CRTC to attach @encoder_cfg to + */ +int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC + * @encoder_cfg: Encoder to detach + * @crtc_cfg: CRTC to detach @encoder_cfg from + */ +void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_create_connector() - Add a new connector configuration + * @config: Configuration to add the connector to + * + * Returns: + * The new connector configuration or an error. Call + * vkms_config_destroy_connector() to free the returned connector configuration. + */ +struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config); + +/** + * vkms_config_destroy_connector() - Remove and free a connector configuration + * @connector_cfg: Connector configuration to destroy + */ +void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg); + +/** + * vkms_config_connector_attach_encoder - Attach a connector to an encoder + * @connector_cfg: Connector to attach + * @encoder_cfg: Encoder to attach @connector_cfg to + */ +int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg); + +/** + * vkms_config_connector_detach_encoder - Detach a connector from an encoder + * @connector_cfg: Connector to detach + * @encoder_cfg: Encoder to detach @connector_cfg from + */ +void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg); + +#endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c new file mode 100644 index 000000000000..48b10cba322a --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_connector.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_managed.h> +#include <drm/drm_probe_helper.h> + +#include "vkms_connector.h" + +static const struct drm_connector_funcs vkms_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vkms_conn_get_modes(struct drm_connector *connector) +{ + int count; + + /* Use the default modes list from DRM */ + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + + return count; +} + +static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; +} + +static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { + .get_modes = vkms_conn_get_modes, + .best_encoder = vkms_conn_best_encoder, +}; + +struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev) +{ + struct drm_device *dev = &vkmsdev->drm; + struct vkms_connector *connector; + int ret; + + connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); + if (!connector) + return ERR_PTR(-ENOMEM); + + ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, NULL); + if (ret) + return ERR_PTR(ret); + + drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs); + + return connector; +} diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h new file mode 100644 index 000000000000..c9149c1b7af0 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_connector.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_CONNECTOR_H_ +#define _VKMS_CONNECTOR_H_ + +#include "vkms_drv.h" + +/** + * struct vkms_connector - VKMS custom type wrapping around the DRM connector + * + * @drm: Base DRM connector + */ +struct vkms_connector { + struct drm_connector base; +}; + +/** + * vkms_connector_init() - Initialize a connector + * @vkmsdev: VKMS device containing the connector + * + * Returns: + * The connector or an error on failure. + */ +struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev); + +#endif /* _VKMS_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index b6de91134a22..a24d1655f7b8 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -27,11 +27,9 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_vblank.h> +#include "vkms_config.h" #include "vkms_drv.h" -#include <drm/drm_print.h> -#include <drm/drm_debugfs.h> - #define DRIVER_NAME "vkms" #define DRIVER_DESC "Virtual Kernel Mode Setting" #define DRIVER_MAJOR 1 @@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_cleanup_planes(dev, old_state); } -static int vkms_config_show(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); - - seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); - seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor); - seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay); - - return 0; -} - -static const struct drm_debugfs_info vkms_config_debugfs_list[] = { - { "vkms_config", vkms_config_show, 0 }, -}; - static const struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .fops = &vkms_driver_fops, @@ -170,8 +151,10 @@ static int vkms_create(struct vkms_config *config) int ret; struct platform_device *pdev; struct vkms_device *vkms_device; + const char *dev_name; - pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + dev_name = vkms_config_get_device_name(config); + pdev = platform_device_register_simple(dev_name, -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); @@ -198,7 +181,8 @@ static int vkms_create(struct vkms_config *config) goto out_devres; } - ret = drm_vblank_init(&vkms_device->drm, 1); + ret = drm_vblank_init(&vkms_device->drm, + vkms_config_get_num_crtcs(config)); if (ret) { DRM_ERROR("Failed to vblank\n"); goto out_devres; @@ -208,8 +192,7 @@ static int vkms_create(struct vkms_config *config) if (ret) goto out_devres; - drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list, - ARRAY_SIZE(vkms_config_debugfs_list)); + vkms_config_register_debugfs(vkms_device); ret = drm_dev_register(&vkms_device->drm, 0); if (ret) @@ -231,17 +214,13 @@ static int __init vkms_init(void) int ret; struct vkms_config *config; - config = kmalloc(sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - - config->cursor = enable_cursor; - config->writeback = enable_writeback; - config->overlay = enable_overlay; + config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay); + if (IS_ERR(config)) + return PTR_ERR(config); ret = vkms_create(config); if (ret) { - kfree(config); + vkms_config_destroy(config); return ret; } @@ -275,7 +254,7 @@ static void __exit vkms_exit(void) return; vkms_destroy(default_config); - kfree(default_config); + vkms_config_destroy(default_config); } module_init(vkms_init); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index abbb652be2b5..a74a7fc3a056 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -12,6 +12,8 @@ #include <drm/drm_encoder.h> #include <drm/drm_writeback.h> +#define DEFAULT_DEVICE_NAME "vkms" + #define XRES_MIN 10 #define YRES_MIN 10 @@ -189,20 +191,7 @@ struct vkms_output { spinlock_t composer_lock; }; -/** - * struct vkms_config - General configuration for VKMS driver - * - * @writeback: If true, a writeback buffer can be attached to the CRTC - * @cursor: If true, a cursor plane is created in the VKMS device - * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device - * @dev: Used to store the current VKMS device. Only set when the device is instantiated. - */ -struct vkms_config { - bool writeback; - bool cursor; - bool overlay; - struct vkms_device *dev; -}; +struct vkms_config; /** * struct vkms_device - Description of a VKMS device diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 22f0d678af3a..8d7ca0cdd79f 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -1,121 +1,111 @@ // SPDX-License-Identifier: GPL-2.0+ +#include "vkms_config.h" +#include "vkms_connector.h" #include "vkms_drv.h" -#include <drm/drm_atomic_helper.h> -#include <drm/drm_edid.h> #include <drm/drm_managed.h> -#include <drm/drm_probe_helper.h> - -static const struct drm_connector_funcs vkms_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int vkms_conn_get_modes(struct drm_connector *connector) -{ - int count; - - /* Use the default modes list from DRM */ - count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); - drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); - - return count; -} - -static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { - .get_modes = vkms_conn_get_modes, -}; int vkms_output_init(struct vkms_device *vkmsdev) { struct drm_device *dev = &vkmsdev->drm; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct vkms_output *output; - struct vkms_plane *primary, *overlay, *cursor = NULL; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; int ret; int writeback; - unsigned int n; - - /* - * Initialize used plane. One primary plane is required to perform the composition. - * - * The overlay and cursor planes are not mandatory, but can be used to perform complex - * composition. - */ - primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY); - if (IS_ERR(primary)) - return PTR_ERR(primary); - - if (vkmsdev->config->cursor) { - cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR); - if (IS_ERR(cursor)) - return PTR_ERR(cursor); - } - output = vkms_crtc_init(dev, &primary->base, - cursor ? &cursor->base : NULL); - if (IS_ERR(output)) { - DRM_ERROR("Failed to allocate CRTC\n"); - return PTR_ERR(output); - } + if (!vkms_config_is_valid(vkmsdev->config)) + return -EINVAL; - if (vkmsdev->config->overlay) { - for (n = 0; n < NUM_OVERLAY_PLANES; n++) { - overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY); - if (IS_ERR(overlay)) { - DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); - return PTR_ERR(overlay); - } - overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc); + vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { + enum drm_plane_type type; + + type = vkms_config_plane_get_type(plane_cfg); + + plane_cfg->plane = vkms_plane_init(vkmsdev, type); + if (IS_ERR(plane_cfg->plane)) { + DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); + return PTR_ERR(plane_cfg->plane); } } - connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); - if (!connector) { - DRM_ERROR("Failed to allocate connector\n"); - return -ENOMEM; - } + vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) { + struct vkms_config_plane *primary, *cursor; - ret = drmm_connector_init(dev, connector, &vkms_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL, NULL); - if (ret) { - DRM_ERROR("Failed to init connector\n"); - return ret; - } + primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg); + cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg); - drm_connector_helper_add(connector, &vkms_conn_helper_funcs); + crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base, + cursor ? &cursor->plane->base : NULL); + if (IS_ERR(crtc_cfg->crtc)) { + DRM_ERROR("Failed to allocate CRTC\n"); + return PTR_ERR(crtc_cfg->crtc); + } - encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL); - if (!encoder) { - DRM_ERROR("Failed to allocate encoder\n"); - return -ENOMEM; + /* Initialize the writeback component */ + if (vkms_config_crtc_get_writeback(crtc_cfg)) { + writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc); + if (writeback) + DRM_ERROR("Failed to init writeback connector\n"); + } } - ret = drmm_encoder_init(dev, encoder, NULL, - DRM_MODE_ENCODER_VIRTUAL, NULL); - if (ret) { - DRM_ERROR("Failed to init encoder\n"); - return ret; + + vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + plane_cfg->plane->base.possible_crtcs |= + drm_crtc_mask(&possible_crtc->crtc->crtc); + } } - encoder->possible_crtcs = drm_crtc_mask(&output->crtc); - /* Attach the encoder and the connector */ - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) { - DRM_ERROR("Failed to attach connector to encoder\n"); - return ret; + vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL); + if (!encoder_cfg->encoder) { + DRM_ERROR("Failed to allocate encoder\n"); + return -ENOMEM; + } + ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) { + DRM_ERROR("Failed to init encoder\n"); + return ret; + } + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { + encoder_cfg->encoder->possible_crtcs |= + drm_crtc_mask(&possible_crtc->crtc->crtc); + } } - /* Initialize the writeback component */ - if (vkmsdev->config->writeback) { - writeback = vkms_enable_writeback_connector(vkmsdev, output); - if (writeback) - DRM_ERROR("Failed to init writeback connector\n"); + vkms_config_for_each_connector(vkmsdev->config, connector_cfg) { + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + + connector_cfg->connector = vkms_connector_init(vkmsdev); + if (IS_ERR(connector_cfg->connector)) { + DRM_ERROR("Failed to init connector\n"); + return PTR_ERR(connector_cfg->connector); + } + + vkms_config_connector_for_each_possible_encoder(connector_cfg, + idx, + possible_encoder) { + ret = drm_connector_attach_encoder(&connector_cfg->connector->base, + possible_encoder->encoder); + if (ret) { + DRM_ERROR("Failed to attach connector to encoder\n"); + return ret; + } + } } drm_mode_config_reset(dev); - return ret; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 46a4ab688a7f..b168fd7fe9b3 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ - vmwgfx_gem.o vmwgfx_vkms.o + vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 64bd7d74854e..fa5841fda659 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -429,7 +429,7 @@ static void *map_external(struct vmw_bo *bo, struct iosys_map *map) void *ptr = NULL; int ret; - if (bo->tbo.base.import_attach) { + if (drm_gem_is_imported(&bo->tbo.base)) { ret = dma_buf_vmap(bo->tbo.base.dma_buf, map); if (ret) { drm_dbg_driver(&vmw->drm, @@ -447,7 +447,7 @@ out: static void unmap_external(struct vmw_bo *bo, struct iosys_map *map) { - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_vunmap(bo->tbo.base.dma_buf, map); else vmw_bo_unmap(bo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 9b5b8c1f063b..f031a312c783 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo) { struct vmw_resource *res; - WARN_ON(vbo->tbo.base.funcs && - kref_read(&vbo->tbo.base.refcount) != 0); + WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0); vmw_bo_unmap(vbo); xa_destroy(&vbo->detached_resources); @@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo) mutex_lock(&res->dev_priv->cmdbuf_mutex); (void)vmw_resource_reserve(res, false, true); vmw_resource_mob_detach(res); + if (res->dirty) + res->func->dirty_free(res); if (res->coherent) vmw_bo_dirty_release(res->guest_memory_bo); res->guest_memory_bo = NULL; res->guest_memory_offset = 0; - vmw_resource_unreserve(res, false, false, false, NULL, + vmw_resource_unreserve(res, true, false, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo) { struct vmw_bo *vbo = to_vmw_bo(&bo->base); - WARN_ON(vbo->dirty); WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); vmw_bo_release(vbo); + WARN_ON(vbo->dirty); kfree(vbo); } @@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw, if (unlikely(ret != 0)) goto out_error; + (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs; return ret; out_error: *p_bo = NULL; @@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) vmw_bo_placement_set(bo, domain, domain); } -void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) +int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) { - xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL); + return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL)); } void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) @@ -887,3 +889,9 @@ out: surf = vmw_res_to_srf(res); return surf; } + +s32 vmw_bo_mobid(struct vmw_bo *vbo) +{ + WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB); + return (s32)vbo->tbo.resource->start; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 11e330c7c7f5..cf84a163bfcb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem); void vmw_bo_swap_notify(struct ttm_buffer_object *bo); -void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); +int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo); @@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf) *buf = NULL; if (tmp_buf) - ttm_bo_put(&tmp_buf->tbo); + drm_gem_object_put(&tmp_buf->tbo.base); } static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) { - ttm_bo_get(&buf->tbo); + drm_gem_object_get(&buf->tbo.base); return buf; } @@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) return container_of((gobj), struct vmw_bo, tbo.base); } +s32 vmw_bo_mobid(struct vmw_bo *vbo); + #endif // VMWGFX_BO_H diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index a7c07692262b..98331c4c0335 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - ret = vmw_gem_object_create(dev_priv, &bo_params, &buf); + ret = vmw_bo_create(dev_priv, &bo_params, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); goto out_done; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c new file mode 100644 index 000000000000..718832b08d96 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c @@ -0,0 +1,844 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/************************************************************************** + * + * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + **************************************************************************/ +#include "vmwgfx_cursor_plane.h" + +#include "vmwgfx_bo.h" +#include "vmwgfx_drv.h" +#include "vmwgfx_kms.h" +#include "vmwgfx_resource_priv.h" +#include "vmw_surface_cache.h" + +#include "drm/drm_atomic.h" +#include "drm/drm_atomic_helper.h" +#include "drm/drm_plane.h" +#include <asm/page.h> + +#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8 +#define VMW_CURSOR_SNOOP_WIDTH 64 +#define VMW_CURSOR_SNOOP_HEIGHT 64 + +struct vmw_svga_fifo_cmd_define_cursor { + u32 cmd; + SVGAFifoCmdDefineAlphaCursor cursor; +}; + +/** + * vmw_send_define_cursor_cmd - queue a define cursor command + * @dev_priv: the private driver struct + * @image: buffer which holds the cursor image + * @width: width of the mouse cursor image + * @height: height of the mouse cursor image + * @hotspotX: the horizontal position of mouse hotspot + * @hotspotY: the vertical position of mouse hotspot + */ +static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, + u32 *image, u32 width, u32 height, + u32 hotspotX, u32 hotspotY) +{ + struct vmw_svga_fifo_cmd_define_cursor *cmd; + const u32 image_size = width * height * sizeof(*image); + const u32 cmd_size = sizeof(*cmd) + image_size; + + /* + * Try to reserve fifocmd space and swallow any failures; + * such reservations cannot be left unconsumed for long + * under the risk of clogging other fifocmd users, so + * we treat reservations separtely from the way we treat + * other fallible KMS-atomic resources at prepare_fb + */ + cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); + + if (unlikely(!cmd)) + return; + + memset(cmd, 0, sizeof(*cmd)); + + memcpy(&cmd[1], image, image_size); + + cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; + cmd->cursor.id = 0; + cmd->cursor.width = width; + cmd->cursor.height = height; + cmd->cursor.hotspotX = hotspotX; + cmd->cursor.hotspotY = hotspotY; + + vmw_cmd_commit_flush(dev_priv, cmd_size); +} + +static void +vmw_cursor_plane_update_legacy(struct vmw_private *vmw, + struct vmw_plane_state *vps) +{ + struct vmw_surface *surface = vmw_user_object_surface(&vps->uo); + s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x; + s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y; + + if (WARN_ON(!surface || !surface->snooper.image)) + return; + + if (vps->cursor.legacy.id != surface->snooper.id) { + vmw_send_define_cursor_cmd(vmw, surface->snooper.image, + vps->base.crtc_w, vps->base.crtc_h, + hotspot_x, hotspot_y); + vps->cursor.legacy.id = surface->snooper.id; + } +} + +static enum vmw_cursor_update_type +vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps) +{ + struct vmw_surface *surface = vmw_user_object_surface(&vps->uo); + + if (surface && surface->snooper.image) + return VMW_CURSOR_UPDATE_LEGACY; + + if (vmw->has_mob) { + if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0) + return VMW_CURSOR_UPDATE_MOB; + } + + return VMW_CURSOR_UPDATE_NONE; +} + +static void vmw_cursor_update_mob(struct vmw_private *vmw, + struct vmw_plane_state *vps) +{ + SVGAGBCursorHeader *header; + SVGAGBAlphaCursorHeader *alpha_header; + struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo); + u32 *image = vmw_bo_map_and_cache(bo); + const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image); + + header = vmw_bo_map_and_cache(vps->cursor.mob); + alpha_header = &header->header.alphaHeader; + + memset(header, 0, sizeof(*header)); + + header->type = SVGA_ALPHA_CURSOR; + header->sizeInBytes = image_size; + + alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x; + alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y; + alpha_header->width = vps->base.crtc_w; + alpha_header->height = vps->base.crtc_h; + + memcpy(header + 1, image, image_size); + vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob)); + + vmw_bo_unmap(bo); + vmw_bo_unmap(vps->cursor.mob); +} + +static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type, + u32 w, u32 h) +{ + switch (update_type) { + case VMW_CURSOR_UPDATE_LEGACY: + case VMW_CURSOR_UPDATE_NONE: + return 0; + case VMW_CURSOR_UPDATE_MOB: + return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader); + } + return 0; +} + +static void vmw_cursor_mob_destroy(struct vmw_bo **vbo) +{ + if (!(*vbo)) + return; + + ttm_bo_unpin(&(*vbo)->tbo); + vmw_bo_unreference(vbo); +} + +/** + * vmw_cursor_mob_unmap - Unmaps the cursor mobs. + * + * @vps: state of the cursor plane + * + * Returns 0 on success + */ + +static int +vmw_cursor_mob_unmap(struct vmw_plane_state *vps) +{ + int ret = 0; + struct vmw_bo *vbo = vps->cursor.mob; + + if (!vbo || !vbo->map.virtual) + return 0; + + ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); + if (likely(ret == 0)) { + vmw_bo_unmap(vbo); + ttm_bo_unreserve(&vbo->tbo); + } + + return ret; +} + +static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp, + struct vmw_plane_state *vps) +{ + u32 i; + + if (!vps->cursor.mob) + return; + + vmw_cursor_mob_unmap(vps); + + /* Look for a free slot to return this mob to the cache. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (!vcp->cursor_mobs[i]) { + vcp->cursor_mobs[i] = vps->cursor.mob; + vps->cursor.mob = NULL; + return; + } + } + + /* Cache is full: See if this mob is bigger than an existing mob. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (vcp->cursor_mobs[i]->tbo.base.size < + vps->cursor.mob->tbo.base.size) { + vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]); + vcp->cursor_mobs[i] = vps->cursor.mob; + vps->cursor.mob = NULL; + return; + } + } + + /* Destroy it if it's not worth caching. */ + vmw_cursor_mob_destroy(&vps->cursor.mob); +} + +static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp, + struct vmw_plane_state *vps) +{ + struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); + u32 size = vmw_cursor_mob_size(vps->cursor.update_type, + vps->base.crtc_w, vps->base.crtc_h); + u32 i; + u32 cursor_max_dim, mob_max_size; + struct vmw_fence_obj *fence = NULL; + int ret; + + if (!dev_priv->has_mob || + (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) + return -EINVAL; + + mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); + cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); + + if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim || + vps->base.crtc_h > cursor_max_dim) + return -EINVAL; + + if (vps->cursor.mob) { + if (vps->cursor.mob->tbo.base.size >= size) + return 0; + vmw_cursor_mob_put(vcp, vps); + } + + /* Look for an unused mob in the cache. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (vcp->cursor_mobs[i] && + vcp->cursor_mobs[i]->tbo.base.size >= size) { + vps->cursor.mob = vcp->cursor_mobs[i]; + vcp->cursor_mobs[i] = NULL; + return 0; + } + } + /* Create a new mob if we can't find an existing one. */ + ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB, + &vps->cursor.mob); + + if (ret != 0) + return ret; + + /* Fence the mob creation so we are guarateed to have the mob */ + ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL); + if (ret != 0) + goto teardown; + + ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + if (ret != 0) { + ttm_bo_unreserve(&vps->cursor.mob->tbo); + goto teardown; + } + + dma_fence_wait(&fence->base, false); + dma_fence_put(&fence->base); + + ttm_bo_unreserve(&vps->cursor.mob->tbo); + + return 0; + +teardown: + vmw_cursor_mob_destroy(&vps->cursor.mob); + return ret; +} + +static void vmw_cursor_update_position(struct vmw_private *dev_priv, + bool show, int x, int y) +{ + const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW + : SVGA_CURSOR_ON_HIDE; + u32 count; + + spin_lock(&dev_priv->cursor_lock); + if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) { + vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x); + vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y); + vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID); + vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on); + vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1); + } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) { + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); + count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); + } else { + vmw_write(dev_priv, SVGA_REG_CURSOR_X, x); + vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y); + vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on); + } + spin_unlock(&dev_priv->cursor_lock); +} + +void vmw_kms_cursor_snoop(struct vmw_surface *srf, + struct ttm_object_file *tfile, + struct ttm_buffer_object *bo, + SVGA3dCmdHeader *header) +{ + struct ttm_bo_kmap_obj map; + unsigned long kmap_offset; + unsigned long kmap_num; + SVGA3dCopyBox *box; + u32 box_count; + void *virtual; + bool is_iomem; + struct vmw_dma_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA dma; + } *cmd; + int i, ret; + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); + const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock; + + cmd = container_of(header, struct vmw_dma_cmd, header); + + /* No snooper installed, nothing to copy */ + if (!srf->snooper.image) + return; + + if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { + DRM_ERROR("face and mipmap for cursors should never != 0\n"); + return; + } + + if (cmd->header.size < 64) { + DRM_ERROR("at least one full copy box must be given\n"); + return; + } + + box = (SVGA3dCopyBox *)&cmd[1]; + box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / + sizeof(SVGA3dCopyBox); + + if (cmd->dma.guest.ptr.offset % PAGE_SIZE || + box->x != 0 || box->y != 0 || box->z != 0 || + box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || + box->d != 1 || box_count != 1 || + box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) { + /* TODO handle none page aligned offsets */ + /* TODO handle more dst & src != 0 */ + /* TODO handle more then one copy */ + DRM_ERROR("Can't snoop dma request for cursor!\n"); + DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", + box->srcx, box->srcy, box->srcz, + box->x, box->y, box->z, + box->w, box->h, box->d, box_count, + cmd->dma.guest.ptr.offset); + return; + } + + kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; + kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT; + + ret = ttm_bo_reserve(bo, true, false, NULL); + if (unlikely(ret != 0)) { + DRM_ERROR("reserve failed\n"); + return; + } + + ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); + if (unlikely(ret != 0)) + goto err_unreserve; + + virtual = ttm_kmap_obj_virtual(&map, &is_iomem); + + if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { + memcpy(srf->snooper.image, virtual, + VMW_CURSOR_SNOOP_HEIGHT * image_pitch); + } else { + /* Image is unsigned pointer. */ + for (i = 0; i < box->h; i++) + memcpy(srf->snooper.image + i * image_pitch, + virtual + i * cmd->dma.guest.pitch, + box->w * desc->pitchBytesPerBlock); + } + srf->snooper.id++; + + ttm_bo_kunmap(&map); +err_unreserve: + ttm_bo_unreserve(bo); +} + +void vmw_cursor_plane_destroy(struct drm_plane *plane) +{ + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + u32 i; + + vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); + + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) + vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]); + + drm_plane_cleanup(plane); +} + +/** + * vmw_cursor_mob_map - Maps the cursor mobs. + * + * @vps: plane_state + * + * Returns 0 on success + */ + +static int +vmw_cursor_mob_map(struct vmw_plane_state *vps) +{ + int ret; + u32 size = vmw_cursor_mob_size(vps->cursor.update_type, + vps->base.crtc_w, vps->base.crtc_h); + struct vmw_bo *vbo = vps->cursor.mob; + + if (!vbo) + return -EINVAL; + + if (vbo->tbo.base.size < size) + return -EINVAL; + + if (vbo->map.virtual) + return 0; + + ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL); + if (unlikely(ret != 0)) + return -ENOMEM; + + vmw_bo_map_and_cache(vbo); + + ttm_bo_unreserve(&vbo->tbo); + + return 0; +} + +/** + * vmw_cursor_plane_cleanup_fb - Unpins the plane surface + * + * @plane: cursor plane + * @old_state: contains the state to clean up + * + * Unmaps all cursor bo mappings and unpins the cursor surface + * + * Returns 0 on success + */ +void +vmw_cursor_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); + + if (!vmw_user_object_is_null(&vps->uo)) + vmw_user_object_unmap(&vps->uo); + + vmw_cursor_mob_unmap(vps); + vmw_cursor_mob_put(vcp, vps); + + vmw_du_plane_unpin_surf(vps); + vmw_user_object_unref(&vps->uo); +} + +static bool +vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps, + struct vmw_plane_state *old_vps) +{ + struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo); + struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo); + struct vmw_surface *surf; + bool dirty = false; + int ret; + + if (new_bo != old_bo) + return true; + + if (new_bo) { + if (!old_bo) { + return true; + } else if (new_bo->dirty) { + vmw_bo_dirty_scan(new_bo); + dirty = vmw_bo_is_dirty(new_bo); + if (dirty) { + surf = vmw_user_object_surface(&new_vps->uo); + if (surf) + vmw_bo_dirty_transfer_to_res(&surf->res); + else + vmw_bo_dirty_clear(new_bo); + } + return dirty; + } else if (new_bo != old_bo) { + /* + * Currently unused because the top exits right away. + * In most cases buffer being different will mean + * that the contents is different. For the few percent + * of cases where that's not true the cost of doing + * the memcmp on all other seems to outweight the + * benefits. Leave the conditional to be able to + * trivially validate it by removing the initial + * if (new_bo != old_bo) at the start. + */ + void *old_image; + void *new_image; + bool changed = false; + struct ww_acquire_ctx ctx; + const u32 size = new_vps->base.crtc_w * + new_vps->base.crtc_h * sizeof(u32); + + ww_acquire_init(&ctx, &reservation_ww_class); + + ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx); + if (ret != 0) { + ww_acquire_fini(&ctx); + return true; + } + + ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx); + if (ret != 0) { + ttm_bo_unreserve(&old_bo->tbo); + ww_acquire_fini(&ctx); + return true; + } + + old_image = vmw_bo_map_and_cache(old_bo); + new_image = vmw_bo_map_and_cache(new_bo); + + if (old_image && new_image && old_image != new_image) + changed = memcmp(old_image, new_image, size) != + 0; + + ttm_bo_unreserve(&new_bo->tbo); + ttm_bo_unreserve(&old_bo->tbo); + + ww_acquire_fini(&ctx); + + return changed; + } + return false; + } + + return false; +} + +static bool +vmw_cursor_plane_changed(struct vmw_plane_state *new_vps, + struct vmw_plane_state *old_vps) +{ + if (old_vps->base.crtc_w != new_vps->base.crtc_w || + old_vps->base.crtc_h != new_vps->base.crtc_h) + return true; + + if (old_vps->base.hotspot_x != new_vps->base.hotspot_x || + old_vps->base.hotspot_y != new_vps->base.hotspot_y) + return true; + + if (old_vps->cursor.legacy.hotspot_x != + new_vps->cursor.legacy.hotspot_x || + old_vps->cursor.legacy.hotspot_y != + new_vps->cursor.legacy.hotspot_y) + return true; + + if (old_vps->base.fb != new_vps->base.fb) + return true; + + return false; +} + +/** + * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it + * + * @plane: display plane + * @new_state: info on the new plane state, including the FB + * + * Returns 0 on success + */ +int vmw_cursor_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state); + struct vmw_private *vmw = vmw_priv(plane->dev); + struct vmw_bo *bo = NULL; + struct vmw_surface *surface; + int ret = 0; + + if (!vmw_user_object_is_null(&vps->uo)) { + vmw_user_object_unmap(&vps->uo); + vmw_user_object_unref(&vps->uo); + } + + if (fb) { + if (vmw_framebuffer_to_vfb(fb)->bo) { + vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer; + vps->uo.surface = NULL; + } else { + memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo)); + } + vmw_user_object_ref(&vps->uo); + } + + vps->cursor.update_type = vmw_cursor_update_type(vmw, vps); + switch (vps->cursor.update_type) { + case VMW_CURSOR_UPDATE_LEGACY: + surface = vmw_user_object_surface(&vps->uo); + if (!surface || vps->cursor.legacy.id == surface->snooper.id) + vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE; + break; + case VMW_CURSOR_UPDATE_MOB: { + bo = vmw_user_object_buffer(&vps->uo); + if (bo) { + struct ttm_operation_ctx ctx = { false, false }; + + ret = ttm_bo_reserve(&bo->tbo, true, false, NULL); + if (ret != 0) + return -ENOMEM; + + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret != 0) + return -ENOMEM; + + /* + * vmw_bo_pin_reserved also validates, so to skip + * the extra validation use ttm_bo_pin directly + */ + if (!bo->tbo.pin_count) + ttm_bo_pin(&bo->tbo); + + if (vmw_framebuffer_to_vfb(fb)->bo) { + const u32 size = new_state->crtc_w * + new_state->crtc_h * + sizeof(u32); + + (void)vmw_bo_map_and_cache_size(bo, size); + } else { + vmw_bo_map_and_cache(bo); + } + ttm_bo_unreserve(&bo->tbo); + } + if (!vmw_user_object_is_null(&vps->uo)) { + if (!vmw_cursor_plane_changed(vps, old_vps) && + !vmw_cursor_buffer_changed(vps, old_vps)) { + vps->cursor.update_type = + VMW_CURSOR_UPDATE_NONE; + } else { + vmw_cursor_mob_get(vcp, vps); + vmw_cursor_mob_map(vps); + } + } + } + break; + case VMW_CURSOR_UPDATE_NONE: + /* do nothing */ + break; + } + + return 0; +} + +/** + * vmw_cursor_plane_atomic_check - check if the new state is okay + * + * @plane: cursor plane + * @state: info on the new plane state + * + * This is a chance to fail if the new cursor state does not fit + * our requirements. + * + * Returns 0 on success + */ +int vmw_cursor_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = + drm_atomic_get_new_plane_state(state, plane); + struct vmw_private *vmw = vmw_priv(plane->dev); + int ret = 0; + struct drm_crtc_state *crtc_state = NULL; + struct vmw_surface *surface = NULL; + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + enum vmw_cursor_update_type update_type; + struct drm_framebuffer *fb = new_state->fb; + + if (new_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, true, + true); + if (ret) + return ret; + + /* Turning off */ + if (!fb) + return 0; + + update_type = vmw_cursor_update_type(vmw, vps); + if (update_type == VMW_CURSOR_UPDATE_LEGACY) { + if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH || + new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) { + drm_warn(&vmw->drm, + "Invalid cursor dimensions (%d, %d)\n", + new_state->crtc_w, new_state->crtc_h); + return -EINVAL; + } + surface = vmw_user_object_surface(&vps->uo); + if (!surface || !surface->snooper.image) { + drm_warn(&vmw->drm, + "surface not suitable for cursor\n"); + return -EINVAL; + } + } + + return 0; +} + +void +vmw_cursor_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; + struct vmw_private *dev_priv = vmw_priv(plane->dev); + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + s32 hotspot_x, hotspot_y, cursor_x, cursor_y; + + /* + * Hide the cursor if the new bo is null + */ + if (vmw_user_object_is_null(&vps->uo)) { + vmw_cursor_update_position(dev_priv, false, 0, 0); + return; + } + + switch (vps->cursor.update_type) { + case VMW_CURSOR_UPDATE_LEGACY: + vmw_cursor_plane_update_legacy(dev_priv, vps); + break; + case VMW_CURSOR_UPDATE_MOB: + vmw_cursor_update_mob(dev_priv, vps); + break; + case VMW_CURSOR_UPDATE_NONE: + /* do nothing */ + break; + } + + /* + * For all update types update the cursor position + */ + cursor_x = new_state->crtc_x + du->set_gui_x; + cursor_y = new_state->crtc_y + du->set_gui_y; + + hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x; + hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y; + + vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x, + cursor_y + hotspot_y); +} + +int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_cursor_bypass_arg *arg = data; + struct vmw_display_unit *du; + struct vmw_plane_state *vps; + struct drm_crtc *crtc; + int ret = 0; + + mutex_lock(&dev->mode_config.mutex); + if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + du = vmw_crtc_to_du(crtc); + vps = vmw_plane_state_to_vps(du->cursor.base.state); + vps->cursor.legacy.hotspot_x = arg->xhot; + vps->cursor.legacy.hotspot_y = arg->yhot; + } + + mutex_unlock(&dev->mode_config.mutex); + return 0; + } + + crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); + if (!crtc) { + ret = -ENOENT; + goto out; + } + + du = vmw_crtc_to_du(crtc); + vps = vmw_plane_state_to_vps(du->cursor.base.state); + vps->cursor.legacy.hotspot_x = arg->xhot; + vps->cursor.legacy.hotspot_y = arg->yhot; + +out: + mutex_unlock(&dev->mode_config.mutex); + + return ret; +} + +void *vmw_cursor_snooper_create(struct drm_file *file_priv, + struct vmw_surface_metadata *metadata) +{ + if (!file_priv->atomic && metadata->scanout && + metadata->num_sizes == 1 && + metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && + metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && + metadata->format == VMW_CURSOR_SNOOP_FORMAT) { + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); + const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH * + VMW_CURSOR_SNOOP_HEIGHT * + desc->pitchBytesPerBlock; + void *image = kzalloc(cursor_size_bytes, GFP_KERNEL); + + if (!image) { + DRM_ERROR("Failed to allocate cursor_image\n"); + return ERR_PTR(-ENOMEM); + } + return image; + } + return NULL; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h new file mode 100644 index 000000000000..40694925a70e --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + **************************************************************************/ + +#ifndef VMWGFX_CURSOR_PLANE_H +#define VMWGFX_CURSOR_PLANE_H + +#include "device_include/svga3d_cmd.h" +#include "drm/drm_file.h" +#include "drm/drm_fourcc.h" +#include "drm/drm_plane.h" + +#include <linux/types.h> + +struct SVGA3dCmdHeader; +struct ttm_buffer_object; +struct vmw_bo; +struct vmw_cursor; +struct vmw_private; +struct vmw_surface; +struct vmw_user_object; + +#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base) + +static const u32 __maybe_unused vmw_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +enum vmw_cursor_update_type { + VMW_CURSOR_UPDATE_NONE = 0, + VMW_CURSOR_UPDATE_LEGACY, + VMW_CURSOR_UPDATE_MOB, +}; + +struct vmw_cursor_plane_state { + enum vmw_cursor_update_type update_type; + bool changed; + bool surface_changed; + struct vmw_bo *mob; + struct { + s32 hotspot_x; + s32 hotspot_y; + u32 id; + } legacy; +}; + +/** + * Derived class for cursor plane object + * + * @base DRM plane object + * @cursor.cursor_mobs Cursor mobs available for re-use + */ +struct vmw_cursor_plane { + struct drm_plane base; + + struct vmw_bo *cursor_mobs[3]; +}; + +struct vmw_surface_metadata; +void *vmw_cursor_snooper_create(struct drm_file *file_priv, + struct vmw_surface_metadata *metadata); +void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header, + struct vmw_surface *srf, + struct ttm_buffer_object *bo); + +void vmw_cursor_plane_destroy(struct drm_plane *plane); + +int vmw_cursor_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state); +void vmw_cursor_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state); +int vmw_cursor_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state); +void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state); + +#endif /* VMWGFX_CURSOR_H */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0f32471c8533..0695a342b1ef 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1,31 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ - #include "vmwgfx_drv.h" #include "vmwgfx_bo.h" @@ -1324,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev, static void vmw_master_drop(struct drm_device *dev, struct drm_file *file_priv) { - struct vmw_private *dev_priv = vmw_priv(dev); - - vmw_kms_legacy_hotspot_clear(dev_priv); } bool vmwgfx_supported(struct vmw_private *vmw) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5275ef632d4b..594af8eb04c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,29 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ #ifndef _VMWGFX_DRV_H_ @@ -58,7 +38,7 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 20 +#define VMWGFX_DRIVER_MINOR 21 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_NUM_DISPLAY_UNITS 8 @@ -100,10 +80,6 @@ #define VMW_RES_SHADER ttm_driver_type4 #define VMW_RES_HT_ORDER 12 -#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8 -#define VMW_CURSOR_SNOOP_WIDTH 64 -#define VMW_CURSOR_SNOOP_HEIGHT 64 - #define MKSSTAT_CAPACITY_LOG2 5U #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) @@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type { struct vmw_cmdbuf_res_manager; struct vmw_cursor_snooper { - size_t age; + size_t id; uint32_t *image; }; @@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) * GEM related functionality - vmwgfx_gem.c */ struct vmw_bo_params; -int vmw_gem_object_create(struct vmw_private *vmw, - struct vmw_bo_params *params, - struct vmw_bo **p_vbo); +extern const struct drm_gem_object_funcs vmw_gem_object_funcs; extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, @@ -1050,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv); int vmw_kms_close(struct vmw_private *dev_priv); int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); void vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, @@ -1067,7 +1040,6 @@ int vmw_kms_present(struct vmw_private *dev_priv, uint32_t num_clips); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); int vmw_kms_suspend(struct drm_device *dev); int vmw_kms_resume(struct drm_device *dev); void vmw_kms_lost_device(struct drm_device *dev); @@ -1393,8 +1365,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv); DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* Resource dirtying - vmwgfx_page_dirty.c */ +bool vmw_bo_is_dirty(struct vmw_bo *vbo); void vmw_bo_dirty_scan(struct vmw_bo *vbo); int vmw_bo_dirty_add(struct vmw_bo *vbo); +void vmw_bo_dirty_clear(struct vmw_bo *vbo); void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); void vmw_bo_dirty_clear_res(struct vmw_resource *res); void vmw_bo_dirty_release(struct vmw_bo *vbo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2e52d73eba48..e831e324e737 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1,29 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ + #include "vmwgfx_binding.h" #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" @@ -4086,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, return 0; } +/* + * DMA fence callback to remove a seqno_waiter + */ +struct seqno_waiter_rm_context { + struct dma_fence_cb base; + struct vmw_private *dev_priv; +}; + +static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct seqno_waiter_rm_context *ctx = + container_of(cb, struct seqno_waiter_rm_context, base); + + vmw_seqno_waiter_remove(ctx->dev_priv); + kfree(ctx); +} + int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, @@ -4266,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv, } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); + struct seqno_waiter_rm_context *ctx = + kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx->dev_priv = dev_priv; + vmw_seqno_waiter_add(dev_priv); + if (dma_fence_add_callback(&fence->base, &ctx->base, + seqno_waiter_rm_cb) < 0) { + vmw_seqno_waiter_remove(dev_priv); + kfree(ctx); + } } } @@ -4512,8 +4520,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out; - vmw_kms_cursor_post_execbuf(dev_priv); - out: if (in_fence) dma_fence_put(in_fence); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index ed5015ced392..c55382167c1b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -84,11 +84,11 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); int ret; - if (obj->import_attach) { - ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + if (drm_gem_is_imported(obj)) { + ret = dma_buf_vmap(obj->dma_buf, map); if (!ret) { if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->import_attach->dmabuf, map); + dma_buf_vunmap(obj->dma_buf, map); return -EIO; } } @@ -101,8 +101,8 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { - if (obj->import_attach) - dma_buf_vunmap(obj->import_attach->dmabuf, map); + if (drm_gem_is_imported(obj)) + dma_buf_vunmap(obj->dma_buf, map); else drm_gem_ttm_vunmap(obj, map); } @@ -111,7 +111,7 @@ static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { int ret; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { /* * Reset both vm_ops and vm_private_data, so we don't end up with * vm_ops pointing to our implementation if the dma-buf backend @@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = { .close = ttm_bo_vm_close, }; -static const struct drm_gem_object_funcs vmw_gem_object_funcs = { +const struct drm_gem_object_funcs vmw_gem_object_funcs = { .free = vmw_gem_object_free, .open = vmw_gem_object_open, .close = vmw_gem_object_close, @@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = { .vm_ops = &vmw_vm_ops, }; -int vmw_gem_object_create(struct vmw_private *vmw, - struct vmw_bo_params *params, - struct vmw_bo **p_vbo) -{ - int ret = vmw_bo_create(vmw, params, p_vbo); - - if (ret != 0) - goto out_no_bo; - - (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; -out_no_bo: - return ret; -} - int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, @@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, .pin = false }; - ret = vmw_gem_object_create(dev_priv, ¶ms, p_vbo); + ret = vmw_bo_create(dev_priv, ¶ms, p_vbo); if (ret != 0) goto out_no_bo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1912ac1cde6d..05b1c54a070c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1,33 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ + #include "vmwgfx_kms.h" #include "vmwgfx_bo.h" +#include "vmwgfx_resource_priv.h" #include "vmwgfx_vkms.h" #include "vmw_surface_cache.h" @@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du) drm_connector_cleanup(&du->connector); } -/* - * Display Unit Cursor functions - */ - -static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps); -static void vmw_cursor_update_mob(struct vmw_private *dev_priv, - struct vmw_plane_state *vps, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY); - -struct vmw_svga_fifo_cmd_define_cursor { - u32 cmd; - SVGAFifoCmdDefineAlphaCursor cursor; -}; - -/** - * vmw_send_define_cursor_cmd - queue a define cursor command - * @dev_priv: the private driver struct - * @image: buffer which holds the cursor image - * @width: width of the mouse cursor image - * @height: height of the mouse cursor image - * @hotspotX: the horizontal position of mouse hotspot - * @hotspotY: the vertical position of mouse hotspot - */ -static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - struct vmw_svga_fifo_cmd_define_cursor *cmd; - const u32 image_size = width * height * sizeof(*image); - const u32 cmd_size = sizeof(*cmd) + image_size; - - /* Try to reserve fifocmd space and swallow any failures; - such reservations cannot be left unconsumed for long - under the risk of clogging other fifocmd users, so - we treat reservations separtely from the way we treat - other fallible KMS-atomic resources at prepare_fb */ - cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); - - if (unlikely(!cmd)) - return; - - memset(cmd, 0, sizeof(*cmd)); - - memcpy(&cmd[1], image, image_size); - - cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; - cmd->cursor.id = 0; - cmd->cursor.width = width; - cmd->cursor.height = height; - cmd->cursor.hotspotX = hotspotX; - cmd->cursor.hotspotY = hotspotY; - - vmw_cmd_commit_flush(dev_priv, cmd_size); -} - -/** - * vmw_cursor_update_image - update the cursor image on the provided plane - * @dev_priv: the private driver struct - * @vps: the plane state of the cursor plane - * @image: buffer which holds the cursor image - * @width: width of the mouse cursor image - * @height: height of the mouse cursor image - * @hotspotX: the horizontal position of mouse hotspot - * @hotspotY: the vertical position of mouse hotspot - */ -static void vmw_cursor_update_image(struct vmw_private *dev_priv, - struct vmw_plane_state *vps, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - if (vps->cursor.bo) - vmw_cursor_update_mob(dev_priv, vps, image, - vps->base.crtc_w, vps->base.crtc_h, - hotspotX, hotspotY); - - else - vmw_send_define_cursor_cmd(dev_priv, image, width, height, - hotspotX, hotspotY); -} - - -/** - * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism - * - * Called from inside vmw_du_cursor_plane_atomic_update to actually - * make the cursor-image live. - * - * @dev_priv: device to work with - * @vps: the plane state of the cursor plane - * @image: cursor source data to fill the MOB with - * @width: source data width - * @height: source data height - * @hotspotX: cursor hotspot x - * @hotspotY: cursor hotspot Y - */ -static void vmw_cursor_update_mob(struct vmw_private *dev_priv, - struct vmw_plane_state *vps, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - SVGAGBCursorHeader *header; - SVGAGBAlphaCursorHeader *alpha_header; - const u32 image_size = width * height * sizeof(*image); - - header = vmw_bo_map_and_cache(vps->cursor.bo); - alpha_header = &header->header.alphaHeader; - - memset(header, 0, sizeof(*header)); - - header->type = SVGA_ALPHA_CURSOR; - header->sizeInBytes = image_size; - - alpha_header->hotspotX = hotspotX; - alpha_header->hotspotY = hotspotY; - alpha_header->width = width; - alpha_header->height = height; - - memcpy(header + 1, image, image_size); - vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, - vps->cursor.bo->tbo.resource->start); -} - - -static u32 vmw_du_cursor_mob_size(u32 w, u32 h) -{ - return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader); -} - -/** - * vmw_du_cursor_plane_acquire_image -- Acquire the image data - * @vps: cursor plane state - */ -static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) -{ - struct vmw_surface *surf; - - if (vmw_user_object_is_null(&vps->uo)) - return NULL; - - surf = vmw_user_object_surface(&vps->uo); - if (surf && !vmw_user_object_is_mapped(&vps->uo)) - return surf->snooper.image; - - return vmw_user_object_map(&vps->uo); -} - -static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, - struct vmw_plane_state *new_vps) -{ - void *old_image; - void *new_image; - u32 size; - bool changed; - - if (old_vps->base.crtc_w != new_vps->base.crtc_w || - old_vps->base.crtc_h != new_vps->base.crtc_h) - return true; - - if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x || - old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y) - return true; - - size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32); - - old_image = vmw_du_cursor_plane_acquire_image(old_vps); - new_image = vmw_du_cursor_plane_acquire_image(new_vps); - - changed = false; - if (old_image && new_image && old_image != new_image) - changed = memcmp(old_image, new_image, size) != 0; - - return changed; -} - -static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) -{ - if (!(*vbo)) - return; - - ttm_bo_unpin(&(*vbo)->tbo); - vmw_bo_unreference(vbo); -} - -static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, - struct vmw_plane_state *vps) -{ - u32 i; - - if (!vps->cursor.bo) - return; - - vmw_du_cursor_plane_unmap_cm(vps); - - /* Look for a free slot to return this mob to the cache. */ - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (!vcp->cursor_mobs[i]) { - vcp->cursor_mobs[i] = vps->cursor.bo; - vps->cursor.bo = NULL; - return; - } - } - - /* Cache is full: See if this mob is bigger than an existing mob. */ - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (vcp->cursor_mobs[i]->tbo.base.size < - vps->cursor.bo->tbo.base.size) { - vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); - vcp->cursor_mobs[i] = vps->cursor.bo; - vps->cursor.bo = NULL; - return; - } - } - - /* Destroy it if it's not worth caching. */ - vmw_du_destroy_cursor_mob(&vps->cursor.bo); -} - -static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, - struct vmw_plane_state *vps) -{ - struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); - u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); - u32 i; - u32 cursor_max_dim, mob_max_size; - struct vmw_fence_obj *fence = NULL; - int ret; - - if (!dev_priv->has_mob || - (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) - return -EINVAL; - - mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); - cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); - - if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim || - vps->base.crtc_h > cursor_max_dim) - return -EINVAL; - - if (vps->cursor.bo) { - if (vps->cursor.bo->tbo.base.size >= size) - return 0; - vmw_du_put_cursor_mob(vcp, vps); - } - - /* Look for an unused mob in the cache. */ - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (vcp->cursor_mobs[i] && - vcp->cursor_mobs[i]->tbo.base.size >= size) { - vps->cursor.bo = vcp->cursor_mobs[i]; - vcp->cursor_mobs[i] = NULL; - return 0; - } - } - /* Create a new mob if we can't find an existing one. */ - ret = vmw_bo_create_and_populate(dev_priv, size, - VMW_BO_DOMAIN_MOB, - &vps->cursor.bo); - - if (ret != 0) - return ret; - - /* Fence the mob creation so we are guarateed to have the mob */ - ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL); - if (ret != 0) - goto teardown; - - ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - if (ret != 0) { - ttm_bo_unreserve(&vps->cursor.bo->tbo); - goto teardown; - } - - dma_fence_wait(&fence->base, false); - dma_fence_put(&fence->base); - - ttm_bo_unreserve(&vps->cursor.bo->tbo); - return 0; - -teardown: - vmw_du_destroy_cursor_mob(&vps->cursor.bo); - return ret; -} - - -static void vmw_cursor_update_position(struct vmw_private *dev_priv, - bool show, int x, int y) -{ - const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW - : SVGA_CURSOR_ON_HIDE; - uint32_t count; - - spin_lock(&dev_priv->cursor_lock); - if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) { - vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x); - vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y); - vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID); - vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on); - vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1); - } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) { - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on); - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); - count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); - } else { - vmw_write(dev_priv, SVGA_REG_CURSOR_X, x); - vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y); - vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on); - } - spin_unlock(&dev_priv->cursor_lock); -} - -void vmw_kms_cursor_snoop(struct vmw_surface *srf, - struct ttm_object_file *tfile, - struct ttm_buffer_object *bo, - SVGA3dCmdHeader *header) -{ - struct ttm_bo_kmap_obj map; - unsigned long kmap_offset; - unsigned long kmap_num; - SVGA3dCopyBox *box; - unsigned box_count; - void *virtual; - bool is_iomem; - struct vmw_dma_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA dma; - } *cmd; - int i, ret; - const struct SVGA3dSurfaceDesc *desc = - vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); - const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock; - - cmd = container_of(header, struct vmw_dma_cmd, header); - - /* No snooper installed, nothing to copy */ - if (!srf->snooper.image) - return; - - if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { - DRM_ERROR("face and mipmap for cursors should never != 0\n"); - return; - } - - if (cmd->header.size < 64) { - DRM_ERROR("at least one full copy box must be given\n"); - return; - } - - box = (SVGA3dCopyBox *)&cmd[1]; - box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / - sizeof(SVGA3dCopyBox); - - if (cmd->dma.guest.ptr.offset % PAGE_SIZE || - box->x != 0 || box->y != 0 || box->z != 0 || - box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1 || - box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) { - /* TODO handle none page aligned offsets */ - /* TODO handle more dst & src != 0 */ - /* TODO handle more then one copy */ - DRM_ERROR("Can't snoop dma request for cursor!\n"); - DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", - box->srcx, box->srcy, box->srcz, - box->x, box->y, box->z, - box->w, box->h, box->d, box_count, - cmd->dma.guest.ptr.offset); - return; - } - - kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; - kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT; - - ret = ttm_bo_reserve(bo, true, false, NULL); - if (unlikely(ret != 0)) { - DRM_ERROR("reserve failed\n"); - return; - } - - ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); - if (unlikely(ret != 0)) - goto err_unreserve; - - virtual = ttm_kmap_obj_virtual(&map, &is_iomem); - - if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { - memcpy(srf->snooper.image, virtual, - VMW_CURSOR_SNOOP_HEIGHT*image_pitch); - } else { - /* Image is unsigned pointer. */ - for (i = 0; i < box->h; i++) - memcpy(srf->snooper.image + i * image_pitch, - virtual + i * cmd->dma.guest.pitch, - box->w * desc->pitchBytesPerBlock); - } - - srf->snooper.age++; - - ttm_bo_kunmap(&map); -err_unreserve: - ttm_bo_unreserve(bo); -} - -/** - * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots - * - * @dev_priv: Pointer to the device private struct. - * - * Clears all legacy hotspots. - */ -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->drm; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - - drm_modeset_lock_all(dev); - drm_for_each_crtc(crtc, dev) { - du = vmw_crtc_to_du(crtc); - - du->hotspot_x = 0; - du->hotspot_y = 0; - } - drm_modeset_unlock_all(dev); -} - -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->drm; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - - mutex_lock(&dev->mode_config.mutex); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - du = vmw_crtc_to_du(crtc); - if (!du->cursor_surface || - du->cursor_age == du->cursor_surface->snooper.age || - !du->cursor_surface->snooper.image) - continue; - - du->cursor_age = du->cursor_surface->snooper.age; - vmw_send_define_cursor_cmd(dev_priv, - du->cursor_surface->snooper.image, - VMW_CURSOR_SNOOP_WIDTH, - VMW_CURSOR_SNOOP_HEIGHT, - du->hotspot_x + du->core_hotspot_x, - du->hotspot_y + du->core_hotspot_y); - } - - mutex_unlock(&dev->mode_config.mutex); -} - - -void vmw_du_cursor_plane_destroy(struct drm_plane *plane) -{ - struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); - u32 i; - - vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); - - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) - vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); - - drm_plane_cleanup(plane); -} - void vmw_du_primary_plane_destroy(struct drm_plane *plane) { @@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane, /** - * vmw_du_cursor_plane_map_cm - Maps the cursor mobs. - * - * @vps: plane_state - * - * Returns 0 on success - */ - -static int -vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps) -{ - int ret; - u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); - struct ttm_buffer_object *bo; - - if (!vps->cursor.bo) - return -EINVAL; - - bo = &vps->cursor.bo->tbo; - - if (bo->base.size < size) - return -EINVAL; - - if (vps->cursor.bo->map.virtual) - return 0; - - ret = ttm_bo_reserve(bo, false, false, NULL); - if (unlikely(ret != 0)) - return -ENOMEM; - - vmw_bo_map_and_cache(vps->cursor.bo); - - ttm_bo_unreserve(bo); - - if (unlikely(ret != 0)) - return -ENOMEM; - - return 0; -} - - -/** - * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs. - * - * @vps: state of the cursor plane - * - * Returns 0 on success - */ - -static int -vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps) -{ - int ret = 0; - struct vmw_bo *vbo = vps->cursor.bo; - - if (!vbo || !vbo->map.virtual) - return 0; - - ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); - if (likely(ret == 0)) { - vmw_bo_unmap(vbo); - ttm_bo_unreserve(&vbo->tbo); - } - - return ret; -} - - -/** - * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface - * - * @plane: cursor plane - * @old_state: contains the state to clean up - * - * Unmaps all cursor bo mappings and unpins the cursor surface - * - * Returns 0 on success - */ -void -vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - - if (!vmw_user_object_is_null(&vps->uo)) - vmw_user_object_unmap(&vps->uo); - - vmw_du_cursor_plane_unmap_cm(vps); - vmw_du_put_cursor_mob(vcp, vps); - - vmw_du_plane_unpin_surf(vps); - vmw_user_object_unref(&vps->uo); -} - - -/** - * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it - * - * @plane: display plane - * @new_state: info on the new plane state, including the FB - * - * Returns 0 on success - */ -int -vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_framebuffer *fb = new_state->fb; - struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); - struct vmw_bo *bo = NULL; - int ret = 0; - - if (!vmw_user_object_is_null(&vps->uo)) { - vmw_user_object_unmap(&vps->uo); - vmw_user_object_unref(&vps->uo); - } - - if (fb) { - if (vmw_framebuffer_to_vfb(fb)->bo) { - vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer; - vps->uo.surface = NULL; - } else { - memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo)); - } - vmw_user_object_ref(&vps->uo); - } - - bo = vmw_user_object_buffer(&vps->uo); - if (bo) { - struct ttm_operation_ctx ctx = {false, false}; - - ret = ttm_bo_reserve(&bo->tbo, true, false, NULL); - if (ret != 0) - return -ENOMEM; - - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (ret != 0) - return -ENOMEM; - - vmw_bo_pin_reserved(bo, true); - if (vmw_framebuffer_to_vfb(fb)->bo) { - const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32); - - (void)vmw_bo_map_and_cache_size(bo, size); - } else { - vmw_bo_map_and_cache(bo); - } - ttm_bo_unreserve(&bo->tbo); - } - - if (!vmw_user_object_is_null(&vps->uo)) { - vmw_du_get_cursor_mob(vcp, vps); - vmw_du_cursor_plane_map_cm(vps); - } - - return 0; -} - - -void -vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, - plane); - struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; - struct vmw_private *dev_priv = vmw_priv(crtc->dev); - struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); - struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); - struct vmw_bo *old_bo = NULL; - struct vmw_bo *new_bo = NULL; - struct ww_acquire_ctx ctx; - s32 hotspot_x, hotspot_y; - int ret; - - hotspot_x = du->hotspot_x + new_state->hotspot_x; - hotspot_y = du->hotspot_y + new_state->hotspot_y; - - du->cursor_surface = vmw_user_object_surface(&vps->uo); - - if (vmw_user_object_is_null(&vps->uo)) { - vmw_cursor_update_position(dev_priv, false, 0, 0); - return; - } - - vps->cursor.hotspot_x = hotspot_x; - vps->cursor.hotspot_y = hotspot_y; - - if (du->cursor_surface) - du->cursor_age = du->cursor_surface->snooper.age; - - ww_acquire_init(&ctx, &reservation_ww_class); - - if (!vmw_user_object_is_null(&old_vps->uo)) { - old_bo = vmw_user_object_buffer(&old_vps->uo); - ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx); - if (ret != 0) - return; - } - - if (!vmw_user_object_is_null(&vps->uo)) { - new_bo = vmw_user_object_buffer(&vps->uo); - if (old_bo != new_bo) { - ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx); - if (ret != 0) { - if (old_bo) { - ttm_bo_unreserve(&old_bo->tbo); - ww_acquire_fini(&ctx); - } - return; - } - } else { - new_bo = NULL; - } - } - if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) { - /* - * If it hasn't changed, avoid making the device do extra - * work by keeping the old cursor active. - */ - struct vmw_cursor_plane_state tmp = old_vps->cursor; - old_vps->cursor = vps->cursor; - vps->cursor = tmp; - } else { - void *image = vmw_du_cursor_plane_acquire_image(vps); - if (image) - vmw_cursor_update_image(dev_priv, vps, image, - new_state->crtc_w, - new_state->crtc_h, - hotspot_x, hotspot_y); - } - - if (new_bo) - ttm_bo_unreserve(&new_bo->tbo); - if (old_bo) - ttm_bo_unreserve(&old_bo->tbo); - - ww_acquire_fini(&ctx); - - du->cursor_x = new_state->crtc_x + du->set_gui_x; - du->cursor_y = new_state->crtc_y + du->set_gui_y; - - vmw_cursor_update_position(dev_priv, true, - du->cursor_x + hotspot_x, - du->cursor_y + hotspot_y); - - du->core_hotspot_x = hotspot_x - du->hotspot_x; - du->core_hotspot_y = hotspot_y - du->hotspot_y; -} - - -/** * vmw_du_primary_plane_atomic_check - check if the new state is okay * * @plane: display plane @@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, return ret; } - -/** - * vmw_du_cursor_plane_atomic_check - check if the new state is okay - * - * @plane: cursor plane - * @state: info on the new plane state - * - * This is a chance to fail if the new cursor state does not fit - * our requirements. - * - * Returns 0 on success - */ -int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - int ret = 0; - struct drm_crtc_state *crtc_state = NULL; - struct vmw_surface *surface = NULL; - struct drm_framebuffer *fb = new_state->fb; - - if (new_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, - new_state->crtc); - - ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - true, true); - if (ret) - return ret; - - /* Turning off */ - if (!fb) - return 0; - - /* A lot of the code assumes this */ - if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { - DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", - new_state->crtc_w, new_state->crtc_h); - return -EINVAL; - } - - if (!vmw_framebuffer_to_vfb(fb)->bo) { - surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo); - - WARN_ON(!surface); - - if (!surface || - (!surface->snooper.image && !surface->res.guest_memory_bo)) { - DRM_ERROR("surface not suitable for cursor\n"); - return -EINVAL; - } - } - - return 0; -} - - int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { @@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; vps->cpp = 0; - memset(&vps->cursor, 0, sizeof(vps->cursor)); + vps->cursor.mob = NULL; /* Each ref counted resource needs to be acquired again */ vmw_user_object_ref(&vps->uo); @@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); + struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo); + if (bo) { + vmw_bo_dirty_release(bo); + /* + * bo->dirty is reference counted so it being NULL + * means that the surface wasn't coherent to begin + * with and so we have to free the dirty tracker + * in the vmw_resource + */ + if (!bo->dirty && surf && surf->res.dirty) + surf->res.func->dirty_free(&surf->res); + } drm_framebuffer_cleanup(framebuffer); vmw_user_object_unref(&vfbs->uo); @@ -1375,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); + vmw_bo_dirty_release(vfbd->buffer); drm_framebuffer_cleanup(framebuffer); vmw_bo_unreference(&vfbd->buffer); @@ -1505,6 +717,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_framebuffer *vfb = NULL; struct vmw_user_object uo = {0}; + struct vmw_bo *bo; + struct vmw_surface *surface; int ret; /* returns either a bo or surface */ @@ -1534,6 +748,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, } err_out: + bo = vmw_user_object_buffer(&uo); + surface = vmw_user_object_surface(&uo); /* vmw_user_object_lookup takes one ref so does new_fb */ vmw_user_object_unref(&uo); @@ -1542,6 +758,14 @@ err_out: return ERR_PTR(ret); } + ttm_bo_reserve(&bo->tbo, false, false, NULL); + ret = vmw_bo_dirty_add(bo); + if (!ret && surface && surface->res.func->dirty_alloc) { + surface->res.coherent = true; + ret = surface->res.func->dirty_alloc(&surface->res); + } + ttm_bo_unreserve(&bo->tbo); + return &vfb->base; } @@ -1974,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv) return ret; } -int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_cursor_bypass_arg *arg = data; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - int ret = 0; - - mutex_lock(&dev->mode_config.mutex); - if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - du = vmw_crtc_to_du(crtc); - du->hotspot_x = arg->xhot; - du->hotspot_y = arg->yhot; - } - - mutex_unlock(&dev->mode_config.mutex); - return 0; - } - - crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); - if (!crtc) { - ret = -ENOENT; - goto out; - } - - du = vmw_crtc_to_du(crtc); - - du->hotspot_x = arg->xhot; - du->hotspot_y = arg->yhot; - -out: - mutex_unlock(&dev->mode_config.mutex); - - return ret; -} - int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 4eab581883e2..511e29cdb987 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -1,40 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ #ifndef VMWGFX_KMS_H_ #define VMWGFX_KMS_H_ +#include "vmwgfx_cursor_plane.h" +#include "vmwgfx_drv.h" + #include <drm/drm_encoder.h> #include <drm/drm_framebuffer.h> #include <drm/drm_probe_helper.h> -#include "vmwgfx_drv.h" - /** * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update * @plane: Plane which is being updated. @@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = { DRM_FORMAT_XRGB1555, }; -static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = { - DRM_FORMAT_ARGB8888, -}; - #define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base) #define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base) #define vmw_connector_state_to_vcs(x) \ container_of(x, struct vmw_connector_state, base) -#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base) /** * Derived class for crtc state object @@ -255,11 +231,6 @@ struct vmw_crtc_state { struct drm_crtc_state base; }; -struct vmw_cursor_plane_state { - struct vmw_bo *bo; - s32 hotspot_x; - s32 hotspot_y; -}; /** * Derived class for plane state object @@ -283,7 +254,6 @@ struct vmw_plane_state { /* For CPU Blit */ unsigned int cpp; - bool surf_mapped; struct vmw_cursor_plane_state cursor; }; @@ -317,17 +287,6 @@ struct vmw_connector_state { int gui_y; }; -/** - * Derived class for cursor plane object - * - * @base DRM plane object - * @cursor.cursor_mobs Cursor mobs available for re-use - */ -struct vmw_cursor_plane { - struct drm_plane base; - - struct vmw_bo *cursor_mobs[3]; -}; /** * Base class display unit. @@ -343,17 +302,6 @@ struct vmw_display_unit { struct drm_plane primary; struct vmw_cursor_plane cursor; - struct vmw_surface *cursor_surface; - size_t cursor_age; - - int cursor_x; - int cursor_y; - - int hotspot_x; - int hotspot_y; - s32 core_hotspot_x; - s32 core_hotspot_y; - unsigned unit; /* @@ -403,8 +351,6 @@ struct vmw_display_unit { */ void vmw_du_init(struct vmw_display_unit *du); void vmw_du_cleanup(struct vmw_display_unit *du); -void vmw_du_crtc_save(struct drm_crtc *crtc); -void vmw_du_crtc_restore(struct drm_crtc *crtc); int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size, @@ -460,19 +406,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv); /* Universal Plane Helpers */ void vmw_du_primary_plane_destroy(struct drm_plane *plane); -void vmw_du_cursor_plane_destroy(struct drm_plane *plane); /* Atomic Helpers */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); -int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state); -void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state); -int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state); -void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state); void vmw_du_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); void vmw_du_plane_reset(struct drm_plane *plane); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index f0b429525467..c23c9195f0dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = { static const struct drm_plane_funcs vmw_ldu_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vmw_du_cursor_plane_destroy, + .destroy = vmw_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, @@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = { */ static const struct drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = { - .atomic_check = vmw_du_cursor_plane_atomic_check, - .atomic_update = vmw_du_cursor_plane_atomic_update, - .prepare_fb = vmw_du_cursor_plane_prepare_fb, - .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, + .atomic_check = vmw_cursor_plane_atomic_check, + .atomic_update = vmw_cursor_plane_atomic_update, + .prepare_fb = vmw_cursor_plane_prepare_fb, + .cleanup_fb = vmw_cursor_plane_cleanup_fb, }; static const struct diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 7055cbefc768..d8204d4265d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -282,8 +282,7 @@ out_no_setup: } vmw_bo_unpin_unlocked(&batch->otable_bo->tbo); - ttm_bo_put(&batch->otable_bo->tbo); - batch->otable_bo = NULL; + vmw_bo_unreference(&batch->otable_bo); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 74ff2812d66a..7de20e56082c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -1,27 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ #include "vmwgfx_bo.h" @@ -71,6 +52,11 @@ struct vmw_bo_dirty { unsigned long bitmap[]; }; +bool vmw_bo_is_dirty(struct vmw_bo *vbo) +{ + return vbo->dirty && (vbo->dirty->start < vbo->dirty->end); +} + /** * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits * @vbo: The buffer object to scan @@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) dirty->end = res_start; } +void vmw_bo_dirty_clear(struct vmw_bo *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + pgoff_t start, cur, end; + unsigned long res_start = 0; + unsigned long res_end = vbo->tbo.base.size; + + WARN_ON_ONCE(res_start & ~PAGE_MASK); + res_start >>= PAGE_SHIFT; + res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); + + if (res_start >= dirty->end || res_end <= dirty->start) + return; + + cur = max(res_start, dirty->start); + res_end = max(res_end, dirty->end); + while (cur < res_end) { + unsigned long num; + + start = find_next_bit(&dirty->bitmap[0], res_end, cur); + if (start >= res_end) + break; + + end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); + cur = end + 1; + num = end - start; + bitmap_clear(&dirty->bitmap[0], start, num); + } + + if (res_start <= dirty->start && res_end > dirty->start) + dirty->start = res_end; + if (res_start < dirty->end && res_end >= dirty->end) + dirty->end = res_start; +} + /** * vmw_bo_dirty_clear_res - Clear a resource's dirty region from * its backing mob. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a73af8a355fb..388011696941 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, goto out_bad_resource; res = converter->base_obj_to_res(base); - kref_get(&res->kref); + vmw_resource_reference(res); *p_res = res; ret = 0; @@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, return 0; } - ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo); + ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); if (unlikely(ret != 0)) goto out_no_bo; @@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, } INIT_LIST_HEAD(&val_list); - ttm_bo_get(&res->guest_memory_bo->tbo); val_buf->bo = &res->guest_memory_bo->tbo; val_buf->num_shared = 0; + drm_gem_object_get(&val_buf->bo->base); list_add_tail(&val_buf->head, &val_list); ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) @@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, out_no_validate: ttm_eu_backoff_reservation(ticket, &val_list); out_no_reserve: - ttm_bo_put(val_buf->bo); + drm_gem_object_put(&val_buf->bo->base); val_buf->bo = NULL; if (guest_memory_dirty) vmw_user_bo_unref(&res->guest_memory_bo); @@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); ttm_eu_backoff_reservation(ticket, &val_list); - ttm_bo_put(val_buf->bo); + drm_gem_object_put(&val_buf->bo->base); val_buf->bo = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 32029d80b72b..5f5f5a94301f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer); + ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer); vmw_overlay_resume_all(dev_priv); if (ret) return ret; @@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = { static const struct drm_plane_funcs vmw_sou_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vmw_du_cursor_plane_destroy, + .destroy = vmw_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, @@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = { */ static const struct drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = { - .atomic_check = vmw_du_cursor_plane_atomic_check, - .atomic_update = vmw_du_cursor_plane_atomic_update, - .prepare_fb = vmw_du_cursor_plane_prepare_fb, - .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, + .atomic_check = vmw_cursor_plane_atomic_check, + .atomic_update = vmw_cursor_plane_atomic_update, + .prepare_fb = vmw_cursor_plane_prepare_fb, + .cleanup_fb = vmw_cursor_plane_cleanup_fb, }; static const struct diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index f5d2ed1b0a72..20aab725e53a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = { static const struct drm_plane_funcs vmw_stdu_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vmw_du_cursor_plane_destroy, + .destroy = vmw_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, @@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = { */ static const struct drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = { - .atomic_check = vmw_du_cursor_plane_atomic_check, - .atomic_update = vmw_du_cursor_plane_atomic_update, - .prepare_fb = vmw_du_cursor_plane_prepare_fb, - .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, + .atomic_check = vmw_cursor_plane_atomic_check, + .atomic_update = vmw_cursor_plane_atomic_update, + .prepare_fb = vmw_cursor_plane_prepare_fb, + .cleanup_fb = vmw_cursor_plane_cleanup_fb, }; static const struct @@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) } drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(&cursor->base); ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5721c74da3e0..7e281c3c6bc5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1,32 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ #include "vmwgfx_bo.h" +#include "vmwgfx_cursor_plane.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" @@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_user_surface *user_srf = container_of(srf, struct vmw_user_surface, srf); - WARN_ON_ONCE(res->dirty); + WARN_ON(res->dirty); if (user_srf->master) drm_master_put(&user_srf->master); kfree(srf->offsets); @@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) * Dumb buffers own the resource and they'll unref the * resource themselves */ - if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb) - return; + WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb); vmw_resource_unreference(&res); } @@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } } res->guest_memory_size = cur_bo_offset; - if (!file_priv->atomic && - metadata->scanout && - metadata->num_sizes == 1 && - metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && - metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && - metadata->format == VMW_CURSOR_SNOOP_FORMAT) { - const struct SVGA3dSurfaceDesc *desc = - vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); - const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH * - VMW_CURSOR_SNOOP_HEIGHT * - desc->pitchBytesPerBlock; - srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL); - if (!srf->snooper.image) { - DRM_ERROR("Failed to allocate cursor_image\n"); - ret = -ENOMEM; - goto out_no_copy; - } - } else { - srf->snooper.image = NULL; + + srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata); + if (IS_ERR(srf->snooper.image)) { + ret = PTR_ERR(srf->snooper.image); + goto out_no_copy; } if (drm_is_primary_client(file_priv)) @@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, .pin = false }; - ret = vmw_gem_object_create(dev_priv, - ¶ms, - &res->guest_memory_bo); + ret = vmw_bo_create(dev_priv, ¶ms, &res->guest_memory_bo); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + + ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; } - vmw_bo_add_detached_resource(res->guest_memory_bo, res); } tmp = vmw_resource_reference(&srf->res); @@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } + if (res->guest_memory_bo) { + ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + } + tmp = vmw_resource_reference(res); ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, VMW_RES_SURFACE, @@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, rep->handle = user_srf->prime.base.handle; rep->backup_size = res->guest_memory_size; if (res->guest_memory_bo) { - vmw_bo_add_detached_resource(res->guest_memory_bo, res); rep->buffer_map_handle = drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); rep->buffer_size = res->guest_memory_bo->tbo.base.size; @@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv, vbo = res->guest_memory_bo; vbo->is_dumb = true; vbo->dumb_surface = vmw_res_to_srf(res); - + drm_gem_object_put(&vbo->tbo.base); + /* + * Unset the user surface dtor since this in not actually exposed + * to userspace. The suface is owned via the dumb_buffer's GEM handle + */ + struct vmw_user_surface *usurf = container_of(vbo->dumb_surface, + struct vmw_user_surface, srf); + usurf->prime.base.refcount_release = NULL; err: if (res) vmw_resource_unreference(&res); - if (ret) - ttm_ref_object_base_unref(tfile, arg.rep.handle); + + ttm_ref_object_base_unref(tfile, arg.rep.handle); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index e7625b3f71e0..7ee93e7191c7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, bo_node->hash.key); } val_buf = &bo_node->base; - val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); - if (!val_buf->bo) - return -ESRCH; + vmw_bo_reference(vbo); + val_buf->bo = &vbo->tbo; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &ctx->bo_list); } @@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx) struct vmw_validation_res_node *val; list_for_each_entry(entry, &ctx->bo_list, base.head) { - ttm_bo_put(entry->base.bo); + drm_gem_object_put(&entry->base.bo->base); entry->base.bo = NULL; } diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 5c2f459a2925..9bce047901b2 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -39,7 +39,6 @@ config DRM_XE select DRM_TTM_HELPER select DRM_EXEC select DRM_GPUVM - select DRM_GPUSVM if !UML && DEVICE_PRIVATE select DRM_SCHED select MMU_NOTIFIER select WANT_DEV_COREDUMP @@ -74,9 +73,22 @@ config DRM_XE_DP_TUNNEL If in doubt say "Y". +config DRM_XE_GPUSVM + bool "Enable CPU to GPU address mirroring" + depends on DRM_XE + depends on !UML + depends on DEVICE_PRIVATE + default y + select DRM_GPUSVM + help + Enable this option if you want support for CPU to GPU address + mirroring. + + If in doubut say "Y". + config DRM_XE_DEVMEM_MIRROR bool "Enable device memory mirror" - depends on DRM_XE + depends on DRM_XE_GPUSVM select GET_FREE_REGION default y help diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 9699b08585f7..c5d6681645ed 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -125,12 +125,13 @@ xe-y += xe_bb.o \ xe_wopcm.o xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o -xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o +xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o # graphics hardware monitoring (HWMON) support xe-$(CONFIG_HWMON) += xe_hwmon.o xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o +xe-$(CONFIG_CONFIGFS_FS) += xe_configfs.o # graphics virtualization (SR-IOV) support xe-y += \ @@ -185,6 +186,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/intel_fbdev_fb.o \ display/xe_display.o \ display/xe_display_misc.o \ + display/xe_display_rpm.o \ display/xe_display_rps.o \ display/xe_display_wa.o \ display/xe_dsb_buffer.o \ diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index ec516e838ee8..448afb86e05c 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -141,6 +141,7 @@ enum xe_guc_action { XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507, XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C, + XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D, XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000, XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002, XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003, diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index d633f1c739e4..7de8f827281f 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -367,6 +367,7 @@ enum xe_guc_klv_ids { GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008, GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009, GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a, + GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b, }; #endif diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index dfec5108d2c3..f89bd5e3520d 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -13,7 +13,6 @@ #include <drm/drm_drv.h> #include "i915_utils.h" -#include "intel_runtime_pm.h" #include "xe_device.h" /* for xe_device_has_flat_ccs() */ #include "xe_device_types.h" diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h deleted file mode 100644 index 274042bff1be..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef __INTEL_RUNTIME_PM_H__ -#define __INTEL_RUNTIME_PM_H__ - -#include "intel_wakeref.h" -#include "xe_device_types.h" -#include "xe_pm.h" - -#define intel_runtime_pm xe_runtime_pm - -static inline void disable_rpm_wakeref_asserts(void *rpm) -{ -} - -static inline void enable_rpm_wakeref_asserts(void *rpm) -{ -} - -static inline bool -intel_runtime_pm_suspended(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - return pm_runtime_suspended(xe->drm.dev); -} - -static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL; -} - -static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL; -} - -static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - xe_pm_runtime_get_noresume(xe); - - return INTEL_WAKEREF_DEF; -} - -static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - xe_pm_runtime_put(xe); -} - -static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref) -{ - if (wakeref) - intel_runtime_pm_put_unchecked(pm); -} - -#define intel_runtime_pm_get_raw intel_runtime_pm_get -#define intel_runtime_pm_put_raw intel_runtime_pm_put -#define assert_rpm_wakelock_held(x) do { } while (0) -#define assert_rpm_raw_wakeref_held(x) do { } while (0) - -#define with_intel_runtime_pm(rpm, wf) \ - for ((wf) = intel_runtime_pm_get(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) - -#endif diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 3a1e505ff182..267f31697343 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, NULL, size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED); + XE_BO_FLAG_GGTT); if (!IS_ERR(obj)) drm_info(&xe->drm, "Allocated fbdev into stolen\n"); else @@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED); + XE_BO_FLAG_GGTT); } if (IS_ERR(obj)) { diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 0b0aca7a25af..20c3bcd953b7 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -147,7 +147,7 @@ int xe_display_init_early(struct xe_device *xe) */ intel_dram_detect(xe); - intel_bw_init_hw(xe); + intel_bw_init_hw(display); intel_display_device_info_runtime_init(display); @@ -173,7 +173,7 @@ static void xe_display_fini(void *arg) struct xe_device *xe = arg; struct intel_display *display = &xe->display; - intel_hpd_poll_fini(xe); + intel_hpd_poll_fini(display); intel_hdcp_component_fini(display); intel_audio_deinit(display); intel_display_driver_remove(display); @@ -220,11 +220,13 @@ void xe_display_unregister(struct xe_device *xe) void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; if (master_ctl & DISPLAY_IRQ) - gen11_display_irq_handler(xe); + gen11_display_irq_handler(display); } void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) @@ -240,19 +242,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) void xe_display_irq_reset(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - gen11_display_irq_reset(xe); + gen11_display_irq_reset(display); } void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; if (gt->info.id == XE_GT0) - gen11_de_irq_postinstall(xe); + gen11_de_irq_postinstall(display); } static bool suspend_to_idle(void) @@ -305,7 +311,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe) intel_dmc_suspend(display); if (has_display(xe)) - intel_hpd_poll_enable(xe); + intel_hpd_poll_enable(display); } static void xe_display_disable_d3cold(struct xe_device *xe) @@ -322,10 +328,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe) intel_display_driver_init_hw(display); - intel_hpd_init(xe); + intel_hpd_init(display); if (has_display(xe)) - intel_hpd_poll_disable(xe); + intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -355,7 +361,7 @@ void xe_display_pm_suspend(struct xe_device *xe) xe_display_flush_cleanup_work(xe); - intel_hpd_cancel_work(xe); + intel_hpd_cancel_work(display); if (has_display(xe)) { intel_display_driver_suspend_access(display); @@ -385,7 +391,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) xe_display_flush_cleanup_work(xe); intel_dp_mst_suspend(display); - intel_hpd_cancel_work(xe); + intel_hpd_cancel_work(display); if (has_display(xe)) intel_display_driver_suspend_access(display); @@ -400,6 +406,8 @@ void xe_display_pm_shutdown(struct xe_device *xe) void xe_display_pm_runtime_suspend(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; @@ -408,7 +416,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe) return; } - intel_hpd_poll_enable(xe); + intel_hpd_poll_enable(display); } void xe_display_pm_suspend_late(struct xe_device *xe) @@ -482,7 +490,7 @@ void xe_display_pm_resume(struct xe_device *xe) if (has_display(xe)) intel_display_driver_resume_access(display); - intel_hpd_init(xe); + intel_hpd_init(display); if (has_display(xe)) { intel_display_driver_resume(display); @@ -491,7 +499,7 @@ void xe_display_pm_resume(struct xe_device *xe) } if (has_display(xe)) - intel_hpd_poll_disable(xe); + intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -502,6 +510,8 @@ void xe_display_pm_resume(struct xe_device *xe) void xe_display_pm_runtime_resume(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; @@ -510,9 +520,9 @@ void xe_display_pm_runtime_resume(struct xe_device *xe) return; } - intel_hpd_init(xe); - intel_hpd_poll_disable(xe); - skl_watermark_ipc_update(xe); + intel_hpd_init(display); + intel_hpd_poll_disable(display); + skl_watermark_ipc_update(display); } diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c new file mode 100644 index 000000000000..1955153aadba --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "intel_display_rpm.h" +#include "xe_device_types.h" +#include "xe_pm.h" + +static struct xe_device *display_to_xe(struct intel_display *display) +{ + return container_of(display, struct xe_device, display); +} + +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) +{ + return intel_display_rpm_get(display); +} + +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_display_rpm_put(display, wakeref); +} + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display) +{ + return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; +} + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) +{ + return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; +} + +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) +{ + xe_pm_runtime_get_noresume(display_to_xe(display)); + + return INTEL_WAKEREF_DEF; +} + +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) +{ + if (wakeref) + xe_pm_runtime_put(display_to_xe(display)); +} + +void intel_display_rpm_put_unchecked(struct intel_display *display) +{ + xe_pm_runtime_put(display_to_xe(display)); +} + +bool intel_display_rpm_suspended(struct intel_display *display) +{ + struct xe_device *xe = display_to_xe(display); + + return pm_runtime_suspended(xe->drm.dev); +} + +void assert_display_rpm_held(struct intel_display *display) +{ + /* FIXME */ +} + +void intel_display_rpm_assert_block(struct intel_display *display) +{ + /* FIXME */ +} + +void intel_display_rpm_assert_unblock(struct intel_display *display) +{ + /* FIXME */ +} diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c index 68e3d1959ad6..2933ca97d673 100644 --- a/drivers/gpu/drm/xe/display/xe_display_wa.c +++ b/drivers/gpu/drm/xe/display/xe_display_wa.c @@ -10,7 +10,9 @@ #include <generated/xe_wa_oob.h> -bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) +bool intel_display_needs_wa_16023588340(struct intel_display *display) { - return XE_WA(xe_root_mmio_gt(i915), 16023588340); + struct xe_device *xe = to_xe_device(display->drm); + + return XE_WA(xe_root_mmio_gt(xe), 16023588340); } diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 4ca0cb571194..6502b8274173 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -83,7 +83,7 @@ initial_plane_bo(struct xe_device *xe, if (plane_config->size == 0) return NULL; - flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT; + flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT; base = round_down(plane_config->base, page_size); if (IS_DGFX(xe)) { diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h new file mode 100644 index 000000000000..2987b10d3e16 --- /dev/null +++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_ALU_COMMANDS_H_ +#define _XE_ALU_COMMANDS_H_ + +#include "instructions/xe_instr_defs.h" + +/* Instruction Opcodes */ +#define CS_ALU_OPCODE_NOOP 0x000 +#define CS_ALU_OPCODE_FENCE_RD 0x001 +#define CS_ALU_OPCODE_FENCE_WR 0x002 +#define CS_ALU_OPCODE_LOAD 0x080 +#define CS_ALU_OPCODE_LOADINV 0x480 +#define CS_ALU_OPCODE_LOAD0 0x081 +#define CS_ALU_OPCODE_LOAD1 0x481 +#define CS_ALU_OPCODE_LOADIND 0x082 +#define CS_ALU_OPCODE_ADD 0x100 +#define CS_ALU_OPCODE_SUB 0x101 +#define CS_ALU_OPCODE_AND 0x102 +#define CS_ALU_OPCODE_OR 0x103 +#define CS_ALU_OPCODE_XOR 0x104 +#define CS_ALU_OPCODE_SHL 0x105 +#define CS_ALU_OPCODE_SHR 0x106 +#define CS_ALU_OPCODE_SAR 0x107 +#define CS_ALU_OPCODE_STORE 0x180 +#define CS_ALU_OPCODE_STOREINV 0x580 +#define CS_ALU_OPCODE_STOREIND 0x181 + +/* Instruction Operands */ +#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n)) +#define CS_ALU_OPERAND_REG0 0x0 +#define CS_ALU_OPERAND_REG1 0x1 +#define CS_ALU_OPERAND_REG2 0x2 +#define CS_ALU_OPERAND_REG3 0x3 +#define CS_ALU_OPERAND_REG4 0x4 +#define CS_ALU_OPERAND_REG5 0x5 +#define CS_ALU_OPERAND_REG6 0x6 +#define CS_ALU_OPERAND_REG7 0x7 +#define CS_ALU_OPERAND_REG8 0x8 +#define CS_ALU_OPERAND_REG9 0x9 +#define CS_ALU_OPERAND_REG10 0xa +#define CS_ALU_OPERAND_REG11 0xb +#define CS_ALU_OPERAND_REG12 0xc +#define CS_ALU_OPERAND_REG13 0xd +#define CS_ALU_OPERAND_REG14 0xe +#define CS_ALU_OPERAND_REG15 0xf +#define CS_ALU_OPERAND_SRCA 0x20 +#define CS_ALU_OPERAND_SRCB 0x21 +#define CS_ALU_OPERAND_ACCU 0x31 +#define CS_ALU_OPERAND_ZF 0x32 +#define CS_ALU_OPERAND_CF 0x33 +#define CS_ALU_OPERAND_NA 0 /* N/A operand */ + +/* Command Streamer ALU Instructions */ +#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \ + REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \ + REG_FIELD_PREP(GENMASK(9, 0), (op2))) + +#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \ + CS_ALU_OPERAND_##op1, \ + CS_ALU_OPERAND_##op2) + +#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA) +#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2) +#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2) +#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA) +#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA) +#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA) +#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA) +#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA) +#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA) +#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA) +#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2) +#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2) + +#endif diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h index 31d28a67ef6a..457881af8af9 100644 --- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h @@ -137,6 +137,7 @@ #define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81) #define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82) #define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83) +#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89) #define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0) #define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4) diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 167fb0f742de..eba582058d55 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -32,6 +32,7 @@ #define MI_BATCH_BUFFER_END __MI_INSTR(0xA) #define MI_TOPOLOGY_FILTER __MI_INSTR(0xD) #define MI_FORCE_WAKEUP __MI_INSTR(0x1D) +#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1)) #define MI_STORE_DATA_IMM __MI_INSTR(0x20) #define MI_SDI_GGTT REG_BIT(22) @@ -61,6 +62,10 @@ #define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4)) #define MI_LRM_USE_GGTT REG_BIT(22) +#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3)) +#define MI_LRR_DST_CS_MMIO REG_BIT(19) +#define MI_LRR_SRC_CS_MMIO REG_BIT(18) + #define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5)) #define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22) #define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index fb8ec317b6ee..da713634d6a0 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -188,6 +188,10 @@ #define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1) #define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0) +#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4) +#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2) +#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1) + #define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08) #define CG3DDISHRS_CLKGATE_DIS REG_BIT(5) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index da1f198ac107..cbb9f7cbcfc0 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -62,7 +62,6 @@ #define LE_SSE_MASK REG_GENMASK(18, 17) #define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value) #define LE_COS_MASK REG_GENMASK(16, 15) -#define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK) #define LE_SCF_MASK REG_BIT(14) #define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value) #define LE_PFM_MASK REG_GENMASK(13, 11) @@ -392,6 +391,18 @@ #define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4) #define XEHP_LNESPARE REG_BIT(19) +#define LSN_VC_REG2 XE_REG_MCR(0xb0c8) +#define LSN_LNI_WGT_MASK REG_GENMASK(31, 28) +#define LSN_LNI_WGT(value) REG_FIELD_PREP(LSN_LNI_WGT_MASK, value) +#define LSN_LNE_WGT_MASK REG_GENMASK(27, 24) +#define LSN_LNE_WGT(value) REG_FIELD_PREP(LSN_LNE_WGT_MASK, value) +#define LSN_DIM_X_WGT_MASK REG_GENMASK(23, 20) +#define LSN_DIM_X_WGT(value) REG_FIELD_PREP(LSN_DIM_X_WGT_MASK, value) +#define LSN_DIM_Y_WGT_MASK REG_GENMASK(19, 16) +#define LSN_DIM_Y_WGT(value) REG_FIELD_PREP(LSN_DIM_Y_WGT_MASK, value) +#define LSN_DIM_Z_WGT_MASK REG_GENMASK(15, 12) +#define LSN_DIM_Z_WGT(value) REG_FIELD_PREP(LSN_DIM_Z_WGT_MASK, value) + #define L3SQCREG2 XE_REG_MCR(0xb104) #define COMPMEMRD256BOVRFETCHEN REG_BIT(20) diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h index 8846eb9ce2a4..c7d5d782e3f9 100644 --- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h @@ -21,6 +21,9 @@ #define BMG_PACKAGE_POWER_SKU XE_REG(0x138098) #define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc) #define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120) +#define BMG_FAN_1_SPEED XE_REG(0x138140) +#define BMG_FAN_2_SPEED XE_REG(0x138170) +#define BMG_FAN_3_SPEED XE_REG(0x1381a0) #define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0) #define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434) #define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 9fde67ca989f..378dcd0fb414 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo, } /* Evict to system. CCS data should be copied. */ - ret = xe_bo_evict(bo, true); + ret = xe_bo_evict(bo); if (ret) { KUNIT_FAIL(test, "Failed to evict bo.\n"); return ret; @@ -252,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc for_each_gt(__gt, xe, id) xe_gt_sanitize(__gt); - err = xe_bo_restore_kernel(xe); + err = xe_bo_restore_early(xe); /* * Snapshotting the CTB and copying back a potentially old * version seems risky, depending on what might have been @@ -273,7 +273,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc goto cleanup_all; } - err = xe_bo_restore_user(xe); + err = xe_bo_restore_late(xe); if (err) { KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err)); goto cleanup_all; diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index cedd3e88a6fb..c53f67ce4b0a 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -65,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, * the exporter and the importer should be the same bo. */ swap(exported->ttm.base.dma_buf, dmabuf); - ret = xe_bo_evict(exported, true); + ret = xe_bo_evict(exported); swap(exported->ttm.base.dma_buf, dmabuf); if (ret) { if (ret != -EINTR && ret != -ERESTARTSYS) diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index d5fe0ea889ad..4a65e3103f77 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -202,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_VRAM_IF_DGFX(tile)); if (IS_ERR(big)) { KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big)); goto vunmap; @@ -211,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_VRAM_IF_DGFX(tile)); if (IS_ERR(pt)) { KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", PTR_ERR(pt)); @@ -222,8 +220,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) tiny = xe_bo_create_pin_map(xe, tile, m->q->vm, 2 * SZ_4K, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_VRAM_IF_DGFX(tile)); if (IS_ERR(tiny)) { KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n", PTR_ERR(tiny)); @@ -512,7 +509,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, dma_fence_put(fence); kunit_info(test, "Evict vram buffer object\n"); - ret = xe_bo_evict(vram_bo, true); + ret = xe_bo_evict(vram_bo); if (ret) { KUNIT_FAIL(test, "Failed to evict bo.\n"); return; diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 64f9c936eea0..3a84a9d92c48 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -55,6 +55,8 @@ static struct ttm_placement sys_placement = { .placement = &sys_placement_flags, }; +static struct ttm_placement purge_placement; + static const struct ttm_place tt_placement_flags[] = { { .fpfn = 0, @@ -189,11 +191,18 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo, static bool force_contiguous(u32 bo_flags) { + if (bo_flags & XE_BO_FLAG_STOLEN) + return true; /* users expect this */ + else if (bo_flags & XE_BO_FLAG_PINNED && + !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) + return true; /* needs vmap */ + /* * For eviction / restore on suspend / resume objects pinned in VRAM * must be contiguous, also only contiguous BOs support xe_bo_vmap. */ - return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT); + return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS && + bo_flags & XE_BO_FLAG_PINNED; } static void add_vram(struct xe_device *xe, struct xe_bo *bo, @@ -281,6 +290,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, static void xe_evict_flags(struct ttm_buffer_object *tbo, struct ttm_placement *placement) { + struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm); + bool device_unplugged = drm_dev_is_unplugged(&xe->drm); struct xe_bo *bo; if (!xe_bo_is_xe_bo(tbo)) { @@ -290,7 +301,7 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, return; } - *placement = sys_placement; + *placement = device_unplugged ? purge_placement : sys_placement; return; } @@ -300,6 +311,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, return; } + if (device_unplugged && !tbo->base.dma_buf) { + *placement = purge_placement; + return; + } + /* * For xe, sg bos that are evicted to system just triggers a * rebind of the sg list upon subsequent validation to XE_PL_TT. @@ -657,11 +673,20 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo, struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + bool device_unplugged = drm_dev_is_unplugged(&xe->drm); struct sg_table *sg; xe_assert(xe, attach); xe_assert(xe, ttm_bo->ttm); + if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM && + ttm_bo->sg) { + dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); + ttm_bo->sg = NULL; + } + if (new_res->mem_type == XE_PL_SYSTEM) goto out; @@ -898,79 +923,44 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, xe_pm_runtime_get_noresume(xe); } - if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) { - /* - * Kernel memory that is pinned should only be moved on suspend - * / resume, some of the pinned memory is required for the - * device to resume / use the GPU to move other evicted memory - * (user memory) around. This likely could be optimized a bit - * further where we find the minimum set of pinned memory - * required for resume but for simplity doing a memcpy for all - * pinned memory. - */ - ret = xe_bo_vmap(bo); - if (!ret) { - ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem); - - /* Create a new VMAP once kernel BO back in VRAM */ - if (!ret && resource_is_vram(new_mem)) { - struct xe_vram_region *vram = res_to_mem_region(new_mem); - void __iomem *new_addr = vram->mapping + - (new_mem->start << PAGE_SHIFT); + if (move_lacks_source) { + u32 flags = 0; - if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { - ret = -EINVAL; - xe_pm_runtime_put(xe); - goto out; - } + if (mem_type_is_vram(new_mem->mem_type)) + flags |= XE_MIGRATE_CLEAR_FLAG_FULL; + else if (handle_system_ccs) + flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA; - xe_assert(xe, new_mem->start == - bo->placements->fpfn); - - iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); - } - } + fence = xe_migrate_clear(migrate, bo, new_mem, flags); } else { - if (move_lacks_source) { - u32 flags = 0; - - if (mem_type_is_vram(new_mem->mem_type)) - flags |= XE_MIGRATE_CLEAR_FLAG_FULL; - else if (handle_system_ccs) - flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA; - - fence = xe_migrate_clear(migrate, bo, new_mem, flags); - } - else - fence = xe_migrate_copy(migrate, bo, bo, old_mem, - new_mem, handle_system_ccs); - if (IS_ERR(fence)) { - ret = PTR_ERR(fence); - xe_pm_runtime_put(xe); - goto out; - } - if (!move_lacks_source) { - ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, - true, new_mem); - if (ret) { - dma_fence_wait(fence, false); - ttm_bo_move_null(ttm_bo, new_mem); - ret = 0; - } - } else { - /* - * ttm_bo_move_accel_cleanup() may blow up if - * bo->resource == NULL, so just attach the - * fence and set the new resource. - */ - dma_resv_add_fence(ttm_bo->base.resv, fence, - DMA_RESV_USAGE_KERNEL); + fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem, + handle_system_ccs); + } + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + xe_pm_runtime_put(xe); + goto out; + } + if (!move_lacks_source) { + ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true, + new_mem); + if (ret) { + dma_fence_wait(fence, false); ttm_bo_move_null(ttm_bo, new_mem); + ret = 0; } - - dma_fence_put(fence); + } else { + /* + * ttm_bo_move_accel_cleanup() may blow up if + * bo->resource == NULL, so just attach the + * fence and set the new resource. + */ + dma_resv_add_fence(ttm_bo->base.resv, fence, + DMA_RESV_USAGE_KERNEL); + ttm_bo_move_null(ttm_bo, new_mem); } + dma_fence_put(fence); xe_pm_runtime_put(xe); out: @@ -1095,6 +1085,80 @@ out_unref: } /** + * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed + * up in system memory. + * @bo: The buffer object to prepare. + * + * On successful completion, the object backup pages are allocated. Expectation + * is that this is called from the PM notifier, prior to suspend/hibernation. + * + * Return: 0 on success. Negative error code on failure. + */ +int xe_bo_notifier_prepare_pinned(struct xe_bo *bo) +{ + struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_bo *backup; + int ret = 0; + + xe_bo_lock(bo, false); + + xe_assert(xe, !bo->backup_obj); + + /* + * Since this is called from the PM notifier we might have raced with + * someone unpinning this after we dropped the pinned list lock and + * grabbing the above bo lock. + */ + if (!xe_bo_is_pinned(bo)) + goto out_unlock_bo; + + if (!xe_bo_is_vram(bo)) + goto out_unlock_bo; + + if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) + goto out_unlock_bo; + + backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size, + DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | + XE_BO_FLAG_PINNED); + if (IS_ERR(backup)) { + ret = PTR_ERR(backup); + goto out_unlock_bo; + } + + backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ + ttm_bo_pin(&backup->ttm); + bo->backup_obj = backup; + +out_unlock_bo: + xe_bo_unlock(bo); + return ret; +} + +/** + * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation. + * @bo: The buffer object to undo the prepare for. + * + * Always returns 0. The backup object is removed, if still present. Expectation + * it that this called from the PM notifier when undoing the prepare step. + * + * Return: Always returns 0. + */ +int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo) +{ + xe_bo_lock(bo, false); + if (bo->backup_obj) { + ttm_bo_unpin(&bo->backup_obj->ttm); + xe_bo_put(bo->backup_obj); + bo->backup_obj = NULL; + } + xe_bo_unlock(bo); + + return 0; +} + +/** * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory * @bo: The buffer object to move. * @@ -1107,59 +1171,99 @@ out_unref: */ int xe_bo_evict_pinned(struct xe_bo *bo) { - struct ttm_place place = { - .mem_type = XE_PL_TT, - }; - struct ttm_placement placement = { - .placement = &place, - .num_placement = 1, - }; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .gfp_retry_mayfail = true, - }; - struct ttm_resource *new_mem; - int ret; + struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_bo *backup = bo->backup_obj; + bool backup_created = false; + bool unmap = false; + int ret = 0; - xe_bo_assert_held(bo); + xe_bo_lock(bo, false); - if (WARN_ON(!bo->ttm.resource)) - return -EINVAL; + if (WARN_ON(!bo->ttm.resource)) { + ret = -EINVAL; + goto out_unlock_bo; + } - if (WARN_ON(!xe_bo_is_pinned(bo))) - return -EINVAL; + if (WARN_ON(!xe_bo_is_pinned(bo))) { + ret = -EINVAL; + goto out_unlock_bo; + } if (!xe_bo_is_vram(bo)) - return 0; + goto out_unlock_bo; + + if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) + goto out_unlock_bo; + + if (!backup) { + backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size, + DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | + XE_BO_FLAG_PINNED); + if (IS_ERR(backup)) { + ret = PTR_ERR(backup); + goto out_unlock_bo; + } + backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ + backup_created = true; + } - ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); - if (ret) - return ret; + if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { + struct xe_migrate *migrate; + struct dma_fence *fence; + + if (bo->tile) + migrate = bo->tile->migrate; + else + migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); + + ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); + if (ret) + goto out_backup; + + ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1); + if (ret) + goto out_backup; - if (!bo->ttm.ttm) { - bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); - if (!bo->ttm.ttm) { - ret = -ENOMEM; - goto err_res_free; + fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource, + backup->ttm.resource, false); + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + goto out_backup; } - } - ret = ttm_bo_populate(&bo->ttm, &ctx); - if (ret) - goto err_res_free; + dma_resv_add_fence(bo->ttm.base.resv, fence, + DMA_RESV_USAGE_KERNEL); + dma_resv_add_fence(backup->ttm.base.resv, fence, + DMA_RESV_USAGE_KERNEL); + dma_fence_put(fence); + } else { + ret = xe_bo_vmap(backup); + if (ret) + goto out_backup; - ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); - if (ret) - goto err_res_free; + if (iosys_map_is_null(&bo->vmap)) { + ret = xe_bo_vmap(bo); + if (ret) + goto out_backup; + unmap = true; + } - ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); - if (ret) - goto err_res_free; + xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, + bo->size); + } - return 0; + if (!bo->backup_obj) + bo->backup_obj = backup; -err_res_free: - ttm_resource_free(&bo->ttm, &new_mem); +out_backup: + xe_bo_vunmap(backup); + if (ret && backup_created) + xe_bo_put(backup); +out_unlock_bo: + if (unmap) + xe_bo_vunmap(bo); + xe_bo_unlock(bo); return ret; } @@ -1180,50 +1284,109 @@ int xe_bo_restore_pinned(struct xe_bo *bo) .interruptible = false, .gfp_retry_mayfail = false, }; - struct ttm_resource *new_mem; - struct ttm_place *place = &bo->placements[0]; + struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_bo *backup = bo->backup_obj; + bool unmap = false; int ret; - xe_bo_assert_held(bo); + if (!backup) + return 0; - if (WARN_ON(!bo->ttm.resource)) - return -EINVAL; + xe_bo_lock(bo, false); - if (WARN_ON(!xe_bo_is_pinned(bo))) - return -EINVAL; + if (!xe_bo_is_pinned(backup)) { + ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx); + if (ret) + goto out_unlock_bo; + } - if (WARN_ON(xe_bo_is_vram(bo))) - return -EINVAL; + if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { + struct xe_migrate *migrate; + struct dma_fence *fence; - if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo))) - return -EINVAL; + if (bo->tile) + migrate = bo->tile->migrate; + else + migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); - if (!mem_type_is_vram(place->mem_type)) - return 0; + ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); + if (ret) + goto out_unlock_bo; - ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); - if (ret) - return ret; + ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1); + if (ret) + goto out_unlock_bo; - ret = ttm_bo_populate(&bo->ttm, &ctx); - if (ret) - goto err_res_free; + fence = xe_migrate_copy(migrate, backup, bo, + backup->ttm.resource, bo->ttm.resource, + false); + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + goto out_unlock_bo; + } - ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); - if (ret) - goto err_res_free; + dma_resv_add_fence(bo->ttm.base.resv, fence, + DMA_RESV_USAGE_KERNEL); + dma_resv_add_fence(backup->ttm.base.resv, fence, + DMA_RESV_USAGE_KERNEL); + dma_fence_put(fence); + } else { + ret = xe_bo_vmap(backup); + if (ret) + goto out_unlock_bo; - ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); - if (ret) - goto err_res_free; + if (iosys_map_is_null(&bo->vmap)) { + ret = xe_bo_vmap(bo); + if (ret) + goto out_backup; + unmap = true; + } - return 0; + xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, + bo->size); + } + + bo->backup_obj = NULL; -err_res_free: - ttm_resource_free(&bo->ttm, &new_mem); +out_backup: + xe_bo_vunmap(backup); + if (!bo->backup_obj) { + if (xe_bo_is_pinned(backup)) + ttm_bo_unpin(&backup->ttm); + xe_bo_put(backup); + } +out_unlock_bo: + if (unmap) + xe_bo_vunmap(bo); + xe_bo_unlock(bo); return ret; } +int xe_bo_dma_unmap_pinned(struct xe_bo *bo) +{ + struct ttm_buffer_object *ttm_bo = &bo->ttm; + struct ttm_tt *tt = ttm_bo->ttm; + + if (tt) { + struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm); + + if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { + dma_buf_unmap_attachment(ttm_bo->base.import_attach, + ttm_bo->sg, + DMA_BIDIRECTIONAL); + ttm_bo->sg = NULL; + xe_tt->sg = NULL; + } else if (xe_tt->sg) { + dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, + DMA_BIDIRECTIONAL, 0); + sg_free_table(xe_tt->sg); + xe_tt->sg = NULL; + } + } + + return 0; +} + static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo, unsigned long page_offset) { @@ -1371,6 +1534,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, struct xe_res_cursor cursor; struct xe_vram_region *vram; int bytes_left = len; + int err = 0; xe_bo_assert_held(bo); xe_device_assert_mem_access(xe); @@ -1378,9 +1542,14 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, if (!mem_type_is_vram(ttm_bo->resource->mem_type)) return -EIO; - /* FIXME: Use GPU for non-visible VRAM */ - if (!xe_ttm_resource_visible(ttm_bo->resource)) - return -EIO; + if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) { + struct xe_migrate *migrate = + mem_type_to_migrate(xe, ttm_bo->resource->mem_type); + + err = xe_migrate_access_memory(migrate, bo, offset, buf, len, + write); + goto out; + } vram = res_to_mem_region(ttm_bo->resource); xe_res_first(ttm_bo->resource, offset & PAGE_MASK, @@ -1404,7 +1573,8 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, xe_res_next(&cursor, PAGE_SIZE); } while (bytes_left); - return len; +out: + return err ?: len; } const struct ttm_device_funcs xe_ttm_funcs = { @@ -1448,6 +1618,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) if (bo->vm && xe_bo_is_user(bo)) xe_vm_put(bo->vm); + if (bo->parent_obj) + xe_bo_put(bo->parent_obj); + mutex_lock(&xe->mem_access.vram_userfault.lock); if (!list_empty(&bo->vram_userfault_link)) list_del(&bo->vram_userfault_link); @@ -1947,7 +2120,7 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, flags |= XE_BO_FLAG_GGTT; bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS, + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED, alignment); if (IS_ERR(bo)) return bo; @@ -2049,7 +2222,8 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str struct xe_bo *bo; u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT; - dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE; + dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); xe_assert(xe, IS_DGFX(xe)); xe_assert(xe, !(*src)->vmap.is_iomem); @@ -2073,10 +2247,16 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res) { struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); - if (res->mem_type == XE_PL_STOLEN) + switch (res->mem_type) { + case XE_PL_STOLEN: return xe_ttm_stolen_gpu_offset(xe); - - return res_to_mem_region(res)->dpa_base; + case XE_PL_TT: + case XE_PL_SYSTEM: + return 0; + default: + return res_to_mem_region(res)->dpa_base; + } + return 0; } /** @@ -2102,12 +2282,9 @@ int xe_bo_pin_external(struct xe_bo *bo) if (err) return err; - if (xe_bo_is_vram(bo)) { - spin_lock(&xe->pinned.lock); - list_add_tail(&bo->pinned_link, - &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); - } + spin_lock(&xe->pinned.lock); + list_add_tail(&bo->pinned_link, &xe->pinned.late.external); + spin_unlock(&xe->pinned.lock); } ttm_bo_pin(&bo->ttm); @@ -2149,25 +2326,12 @@ int xe_bo_pin(struct xe_bo *bo) if (err) return err; - /* - * For pinned objects in on DGFX, which are also in vram, we expect - * these to be in contiguous VRAM memory. Required eviction / restore - * during suspend / resume (force restore to same physical address). - */ - if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && - bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { - if (mem_type_is_vram(place->mem_type)) { - xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); - - place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - - vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; - place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); - } - } - if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { spin_lock(&xe->pinned.lock); - list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); + if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) + list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present); + else + list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present); spin_unlock(&xe->pinned.lock); } @@ -2231,6 +2395,13 @@ void xe_bo_unpin(struct xe_bo *bo) xe_assert(xe, !list_empty(&bo->pinned_link)); list_del_init(&bo->pinned_link); spin_unlock(&xe->pinned.lock); + + if (bo->backup_obj) { + if (xe_bo_is_pinned(bo->backup_obj)) + ttm_bo_unpin(&bo->backup_obj->ttm); + xe_bo_put(bo->backup_obj); + bo->backup_obj = NULL; + } } ttm_bo_unpin(&bo->ttm); if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) @@ -2759,19 +2930,17 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) /** * xe_bo_evict - Evict an object to evict placement * @bo: The buffer object to migrate. - * @force_alloc: Set force_alloc in ttm_operation_ctx * * On successful completion, the object memory will be moved to evict * placement. This function blocks until the object has been fully moved. * * Return: 0 on success. Negative error code on failure. */ -int xe_bo_evict(struct xe_bo *bo, bool force_alloc) +int xe_bo_evict(struct xe_bo *bo) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, - .force_alloc = force_alloc, .gfp_retry_mayfail = true, }; struct ttm_placement placement; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index ec3e4446d027..02ada1fb8a23 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -39,20 +39,23 @@ #define XE_BO_FLAG_NEEDS_64K BIT(15) #define XE_BO_FLAG_NEEDS_2M BIT(16) #define XE_BO_FLAG_GGTT_INVALIDATE BIT(17) -#define XE_BO_FLAG_GGTT0 BIT(18) -#define XE_BO_FLAG_GGTT1 BIT(19) -#define XE_BO_FLAG_GGTT2 BIT(20) -#define XE_BO_FLAG_GGTT3 BIT(21) -#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \ - XE_BO_FLAG_GGTT1 | \ - XE_BO_FLAG_GGTT2 | \ - XE_BO_FLAG_GGTT3) -#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22) +#define XE_BO_FLAG_PINNED_NORESTORE BIT(18) +#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19) +#define XE_BO_FLAG_GGTT0 BIT(20) +#define XE_BO_FLAG_GGTT1 BIT(21) +#define XE_BO_FLAG_GGTT2 BIT(22) +#define XE_BO_FLAG_GGTT3 BIT(23) +#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24) /* this one is trigger internally only */ #define XE_BO_FLAG_INTERNAL_TEST BIT(30) #define XE_BO_FLAG_INTERNAL_64K BIT(31) +#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \ + XE_BO_FLAG_GGTT1 | \ + XE_BO_FLAG_GGTT2 | \ + XE_BO_FLAG_GGTT3) + #define XE_BO_FLAG_GGTTx(tile) \ (XE_BO_FLAG_GGTT0 << (tile)->id) @@ -271,11 +274,15 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res); bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type); int xe_bo_migrate(struct xe_bo *bo, u32 mem_type); -int xe_bo_evict(struct xe_bo *bo, bool force_alloc); +int xe_bo_evict(struct xe_bo *bo); int xe_bo_evict_pinned(struct xe_bo *bo); +int xe_bo_notifier_prepare_pinned(struct xe_bo *bo); +int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo); int xe_bo_restore_pinned(struct xe_bo *bo); +int xe_bo_dma_unmap_pinned(struct xe_bo *bo); + extern const struct ttm_device_funcs xe_ttm_funcs; extern const char *const xe_mem_type_to_name[]; diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 6a40eedd9db1..ed3746d32b27 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -10,28 +10,103 @@ #include "xe_ggtt.h" #include "xe_tile.h" +typedef int (*xe_pinned_fn)(struct xe_bo *bo); + +static int xe_bo_apply_to_pinned(struct xe_device *xe, + struct list_head *pinned_list, + struct list_head *new_list, + const xe_pinned_fn pinned_fn) +{ + LIST_HEAD(still_in_list); + struct xe_bo *bo; + int ret = 0; + + spin_lock(&xe->pinned.lock); + while (!ret) { + bo = list_first_entry_or_null(pinned_list, typeof(*bo), + pinned_link); + if (!bo) + break; + xe_bo_get(bo); + list_move_tail(&bo->pinned_link, &still_in_list); + spin_unlock(&xe->pinned.lock); + + ret = pinned_fn(bo); + if (ret && pinned_list != new_list) { + spin_lock(&xe->pinned.lock); + /* + * We might no longer be pinned, since PM notifier can + * call this. If the pinned link is now empty, keep it + * that way. + */ + if (!list_empty(&bo->pinned_link)) + list_move(&bo->pinned_link, pinned_list); + spin_unlock(&xe->pinned.lock); + } + xe_bo_put(bo); + spin_lock(&xe->pinned.lock); + } + list_splice_tail(&still_in_list, new_list); + spin_unlock(&xe->pinned.lock); + + return ret; +} + /** - * xe_bo_evict_all - evict all BOs from VRAM + * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all + * pinned VRAM objects which need to be saved. + * @xe: xe device + * + * Should be called from PM notifier when preparing for s3/s4. * + * Return: 0 on success, negative error code on error. + */ +int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe) +{ + int ret; + + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present, + &xe->pinned.early.kernel_bo_present, + xe_bo_notifier_prepare_pinned); + if (!ret) + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, + &xe->pinned.late.kernel_bo_present, + xe_bo_notifier_prepare_pinned); + + return ret; +} + +/** + * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all + * pinned VRAM objects which have been restored. * @xe: xe device * - * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next - * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU. - * All eviction magic done via TTM calls. + * Should be called from PM notifier after exiting s3/s4 (either on success or + * failure). + */ +void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe) +{ + (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present, + &xe->pinned.early.kernel_bo_present, + xe_bo_notifier_unprepare_pinned); + + (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, + &xe->pinned.late.kernel_bo_present, + xe_bo_notifier_unprepare_pinned); +} + +/** + * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM + * @xe: xe device * - * Evict == move VRAM BOs to temporary (typically system) memory. + * Evict non-pinned user BOs (via GPU). * - * This function should be called before the device goes into a suspend state - * where the VRAM loses power. + * Evict == move VRAM BOs to temporary (typically system) memory. */ -int xe_bo_evict_all(struct xe_device *xe) +int xe_bo_evict_all_user(struct xe_device *xe) { struct ttm_device *bdev = &xe->ttm; - struct xe_bo *bo; - struct xe_tile *tile; - struct list_head still_in_list; u32 mem_type; - u8 id; int ret; /* User memory */ @@ -57,34 +132,38 @@ int xe_bo_evict_all(struct xe_device *xe) } } - /* Pinned user memory in VRAM */ - INIT_LIST_HEAD(&still_in_list); - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.external_vram, - typeof(*bo), pinned_link); - if (!bo) - break; - xe_bo_get(bo); - list_move_tail(&bo->pinned_link, &still_in_list); - spin_unlock(&xe->pinned.lock); + return 0; +} - xe_bo_lock(bo, false); - ret = xe_bo_evict_pinned(bo); - xe_bo_unlock(bo); - xe_bo_put(bo); - if (ret) { - spin_lock(&xe->pinned.lock); - list_splice_tail(&still_in_list, - &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); - return ret; - } +/** + * xe_bo_evict_all - evict all BOs from VRAM + * @xe: xe device + * + * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next + * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU. + * All eviction magic done via TTM calls. + * + * Evict == move VRAM BOs to temporary (typically system) memory. + * + * This function should be called before the device goes into a suspend state + * where the VRAM loses power. + */ +int xe_bo_evict_all(struct xe_device *xe) +{ + struct xe_tile *tile; + u8 id; + int ret; - spin_lock(&xe->pinned.lock); - } - list_splice_tail(&still_in_list, &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); + ret = xe_bo_evict_all_user(xe); + if (ret) + return ret; + + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, + &xe->pinned.late.evicted, xe_bo_evict_pinned); + + if (!ret) + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, + &xe->pinned.late.evicted, xe_bo_evict_pinned); /* * Wait for all user BO to be evicted as those evictions depend on the @@ -93,32 +172,49 @@ int xe_bo_evict_all(struct xe_device *xe) for_each_tile(tile, xe, id) xe_tile_migrate_wait(tile); - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present, - typeof(*bo), pinned_link); - if (!bo) - break; - xe_bo_get(bo); - list_move_tail(&bo->pinned_link, &xe->pinned.evicted); - spin_unlock(&xe->pinned.lock); + if (ret) + return ret; - xe_bo_lock(bo, false); - ret = xe_bo_evict_pinned(bo); - xe_bo_unlock(bo); - xe_bo_put(bo); - if (ret) - return ret; + return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present, + &xe->pinned.early.evicted, + xe_bo_evict_pinned); +} - spin_lock(&xe->pinned.lock); +static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo) +{ + struct xe_device *xe = xe_bo_device(bo); + int ret; + + ret = xe_bo_restore_pinned(bo); + if (ret) + return ret; + + if (bo->flags & XE_BO_FLAG_GGTT) { + struct xe_tile *tile; + u8 id; + + for_each_tile(tile, xe_bo_device(bo), id) { + if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) + continue; + + mutex_lock(&tile->mem.ggtt->lock); + xe_ggtt_map_bo(tile->mem.ggtt, bo); + mutex_unlock(&tile->mem.ggtt->lock); + } } - spin_unlock(&xe->pinned.lock); + + /* + * We expect validate to trigger a move VRAM and our move code + * should setup the iosys map. + */ + xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) || + !iosys_map_is_null(&bo->vmap)); return 0; } /** - * xe_bo_restore_kernel - restore kernel BOs to VRAM + * xe_bo_restore_early - restore early phase kernel BOs to VRAM * * @xe: xe device * @@ -128,111 +224,130 @@ int xe_bo_evict_all(struct xe_device *xe) * This function should be called early, before trying to init the GT, on device * resume. */ -int xe_bo_restore_kernel(struct xe_device *xe) +int xe_bo_restore_early(struct xe_device *xe) { - struct xe_bo *bo; - int ret; + return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted, + &xe->pinned.early.kernel_bo_present, + xe_bo_restore_and_map_ggtt); +} - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.evicted, - typeof(*bo), pinned_link); - if (!bo) - break; - xe_bo_get(bo); - list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); - spin_unlock(&xe->pinned.lock); +/** + * xe_bo_restore_late - restore pinned late phase BOs + * + * @xe: xe device + * + * Move pinned user and kernel BOs which can use blitter from temporary + * (typically system) memory to VRAM. All moves done via TTM calls. + * + * This function should be called late, after GT init, on device resume. + */ +int xe_bo_restore_late(struct xe_device *xe) +{ + struct xe_tile *tile; + int ret, id; - xe_bo_lock(bo, false); - ret = xe_bo_restore_pinned(bo); - xe_bo_unlock(bo); - if (ret) { - xe_bo_put(bo); - return ret; - } + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted, + &xe->pinned.late.kernel_bo_present, + xe_bo_restore_and_map_ggtt); - if (bo->flags & XE_BO_FLAG_GGTT) { - struct xe_tile *tile; - u8 id; + for_each_tile(tile, xe, id) + xe_tile_migrate_wait(tile); - for_each_tile(tile, xe, id) { - if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) - continue; + if (ret) + return ret; - mutex_lock(&tile->mem.ggtt->lock); - xe_ggtt_map_bo(tile->mem.ggtt, bo); - mutex_unlock(&tile->mem.ggtt->lock); - } - } + if (!IS_DGFX(xe)) + return 0; - /* - * We expect validate to trigger a move VRAM and our move code - * should setup the iosys map. - */ - xe_assert(xe, !iosys_map_is_null(&bo->vmap)); + /* Pinned user memory in VRAM should be validated on resume */ + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external, + &xe->pinned.late.external, + xe_bo_restore_pinned); - xe_bo_put(bo); + /* Wait for restore to complete */ + for_each_tile(tile, xe, id) + xe_tile_migrate_wait(tile); - spin_lock(&xe->pinned.lock); - } - spin_unlock(&xe->pinned.lock); + return ret; +} - return 0; +static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe) +{ + struct xe_tile *tile; + unsigned int id; + + (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external, + &xe->pinned.late.external, + xe_bo_dma_unmap_pinned); + for_each_tile(tile, xe, id) + xe_tile_migrate_wait(tile); } /** - * xe_bo_restore_user - restore pinned user BOs to VRAM - * - * @xe: xe device + * xe_bo_pci_dev_remove_all() - Handle bos when the pci_device is about to be removed + * @xe: The xe device. * - * Move pinned user BOs from temporary (typically system) memory to VRAM via - * CPU. All moves done via TTM calls. + * On pci_device removal we need to drop all dma mappings and move + * the data of exported bos out to system. This includes SVM bos and + * exported dma-buf bos. This is done by evicting all bos, but + * the evict placement in xe_evict_flags() is chosen such that all + * bos except those mentioned are purged, and thus their memory + * is released. * - * This function should be called late, after GT init, on device resume. + * For pinned bos, we're unmapping dma. */ -int xe_bo_restore_user(struct xe_device *xe) +void xe_bo_pci_dev_remove_all(struct xe_device *xe) { - struct xe_bo *bo; - struct xe_tile *tile; - struct list_head still_in_list; - u8 id; - int ret; + unsigned int mem_type; - if (!IS_DGFX(xe)) - return 0; + /* + * Move pagemap bos and exported dma-buf to system, and + * purge everything else. + */ + for (mem_type = XE_PL_VRAM1; mem_type >= XE_PL_TT; --mem_type) { + struct ttm_resource_manager *man = + ttm_manager_type(&xe->ttm, mem_type); - /* Pinned user memory in VRAM should be validated on resume */ - INIT_LIST_HEAD(&still_in_list); - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.external_vram, - typeof(*bo), pinned_link); - if (!bo) - break; - list_move_tail(&bo->pinned_link, &still_in_list); - xe_bo_get(bo); - spin_unlock(&xe->pinned.lock); + if (man) { + int ret = ttm_resource_manager_evict_all(&xe->ttm, man); - xe_bo_lock(bo, false); - ret = xe_bo_restore_pinned(bo); - xe_bo_unlock(bo); - xe_bo_put(bo); - if (ret) { - spin_lock(&xe->pinned.lock); - list_splice_tail(&still_in_list, - &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); - return ret; + drm_WARN_ON(&xe->drm, ret); } - - spin_lock(&xe->pinned.lock); } - list_splice_tail(&still_in_list, &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); - /* Wait for restore to complete */ - for_each_tile(tile, xe, id) - xe_tile_migrate_wait(tile); + xe_bo_pci_dev_remove_pinned(xe); +} - return 0; +static void xe_bo_pinned_fini(void *arg) +{ + struct xe_device *xe = arg; + + (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, + &xe->pinned.late.kernel_bo_present, + xe_bo_dma_unmap_pinned); + (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present, + &xe->pinned.early.kernel_bo_present, + xe_bo_dma_unmap_pinned); +} + +/** + * xe_bo_pinned_init() - Initialize pinned bo tracking + * @xe: The xe device. + * + * Initializes the lists and locks required for pinned bo + * tracking and registers a callback to dma-unmap + * any remaining pinned bos on pci device removal. + * + * Return: %0 on success, negative error code on error. + */ +int xe_bo_pinned_init(struct xe_device *xe) +{ + spin_lock_init(&xe->pinned.lock); + INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present); + INIT_LIST_HEAD(&xe->pinned.early.evicted); + INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present); + INIT_LIST_HEAD(&xe->pinned.late.evicted); + INIT_LIST_HEAD(&xe->pinned.late.external); + + return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe); } diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h index 746894798852..e8385cb7f5e9 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.h +++ b/drivers/gpu/drm/xe/xe_bo_evict.h @@ -9,7 +9,13 @@ struct xe_device; int xe_bo_evict_all(struct xe_device *xe); -int xe_bo_restore_kernel(struct xe_device *xe); -int xe_bo_restore_user(struct xe_device *xe); +int xe_bo_evict_all_user(struct xe_device *xe); +int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe); +void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe); +int xe_bo_restore_early(struct xe_device *xe); +int xe_bo_restore_late(struct xe_device *xe); +void xe_bo_pci_dev_remove_all(struct xe_device *xe); + +int xe_bo_pinned_init(struct xe_device *xe); #endif diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 15a92e3d4898..eb5e83c5f233 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -28,6 +28,10 @@ struct xe_vm; struct xe_bo { /** @ttm: TTM base buffer object */ struct ttm_buffer_object ttm; + /** @backup_obj: The backup object when pinned and suspended (vram only) */ + struct xe_bo *backup_obj; + /** @parent_obj: Ref to parent bo if this a backup_obj */ + struct xe_bo *parent_obj; /** @size: Size of this buffer object */ size_t size; /** @flags: flags for this buffer object */ diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c new file mode 100644 index 000000000000..cb9f175c89a1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/configfs.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "xe_configfs.h" +#include "xe_module.h" + +/** + * DOC: Xe Configfs + * + * Overview + * ========= + * + * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a + * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory + * The user can create devices under this directory and configure them as necessary + * See Documentation/filesystems/configfs.rst for more information about how configfs works. + * + * Create devices + * =============== + * + * In order to create a device, the user has to create a directory inside ``'xe'``:: + * + * mkdir /sys/kernel/config/xe/0000:03:00.0/ + * + * Every device created is populated by the driver with entries that can be + * used to configure it:: + * + * /sys/kernel/config/xe/ + * .. 0000:03:00.0/ + * ... survivability_mode + * + * Configure Attributes + * ==================== + * + * Survivability mode: + * ------------------- + * + * Enable survivability mode on supported cards. This setting only takes + * effect when probing the device. Example to enable it:: + * + * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode + * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported) + * + * Remove devices + * ============== + * + * The created device directories can be removed using ``rmdir``:: + * + * rmdir /sys/kernel/config/xe/0000:03:00.0/ + */ + +struct xe_config_device { + struct config_group group; + + bool survivability_mode; + + /* protects attributes */ + struct mutex lock; +}; + +static struct xe_config_device *to_xe_config_device(struct config_item *item) +{ + return container_of(to_config_group(item), struct xe_config_device, group); +} + +static ssize_t survivability_mode_show(struct config_item *item, char *page) +{ + struct xe_config_device *dev = to_xe_config_device(item); + + return sprintf(page, "%d\n", dev->survivability_mode); +} + +static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len) +{ + struct xe_config_device *dev = to_xe_config_device(item); + bool survivability_mode; + int ret; + + ret = kstrtobool(page, &survivability_mode); + if (ret) + return ret; + + mutex_lock(&dev->lock); + dev->survivability_mode = survivability_mode; + mutex_unlock(&dev->lock); + + return len; +} + +CONFIGFS_ATTR(, survivability_mode); + +static struct configfs_attribute *xe_config_device_attrs[] = { + &attr_survivability_mode, + NULL, +}; + +static void xe_config_device_release(struct config_item *item) +{ + struct xe_config_device *dev = to_xe_config_device(item); + + mutex_destroy(&dev->lock); + kfree(dev); +} + +static struct configfs_item_operations xe_config_device_ops = { + .release = xe_config_device_release, +}; + +static const struct config_item_type xe_config_device_type = { + .ct_item_ops = &xe_config_device_ops, + .ct_attrs = xe_config_device_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *xe_config_make_device_group(struct config_group *group, + const char *name) +{ + unsigned int domain, bus, slot, function; + struct xe_config_device *dev; + struct pci_dev *pdev; + int ret; + + ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function); + if (ret != 4) + return ERR_PTR(-EINVAL); + + pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function)); + if (!pdev) + return ERR_PTR(-EINVAL); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + config_group_init_type_name(&dev->group, name, &xe_config_device_type); + + mutex_init(&dev->lock); + + return &dev->group; +} + +static struct configfs_group_operations xe_config_device_group_ops = { + .make_group = xe_config_make_device_group, +}; + +static const struct config_item_type xe_configfs_type = { + .ct_group_ops = &xe_config_device_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem xe_configfs = { + .su_group = { + .cg_item = { + .ci_namebuf = "xe", + .ci_type = &xe_configfs_type, + }, + }, +}; + +static struct xe_config_device *configfs_find_group(struct pci_dev *pdev) +{ + struct config_item *item; + char name[64]; + + snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus), + pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + mutex_lock(&xe_configfs.su_mutex); + item = config_group_find_item(&xe_configfs.su_group, name); + mutex_unlock(&xe_configfs.su_mutex); + + if (!item) + return NULL; + + return to_xe_config_device(item); +} + +/** + * xe_configfs_get_survivability_mode - get configfs survivability mode attribute + * @pdev: pci device + * + * find the configfs group that belongs to the pci device and return + * the survivability mode attribute + * + * Return: survivability mode if config group is found, false otherwise + */ +bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) +{ + struct xe_config_device *dev = configfs_find_group(pdev); + bool mode; + + if (!dev) + return false; + + mode = dev->survivability_mode; + config_item_put(&dev->group.cg_item); + + return mode; +} + +/** + * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute + * @pdev: pci device + * + * find the configfs group that belongs to the pci device and clear survivability + * mode attribute + */ +void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) +{ + struct xe_config_device *dev = configfs_find_group(pdev); + + if (!dev) + return; + + mutex_lock(&dev->lock); + dev->survivability_mode = 0; + mutex_unlock(&dev->lock); + + config_item_put(&dev->group.cg_item); +} + +int __init xe_configfs_init(void) +{ + struct config_group *root = &xe_configfs.su_group; + int ret; + + config_group_init(root); + mutex_init(&xe_configfs.su_mutex); + ret = configfs_register_subsystem(&xe_configfs); + if (ret) { + pr_err("Error %d while registering %s subsystem\n", + ret, root->cg_item.ci_namebuf); + return ret; + } + + return 0; +} + +void __exit xe_configfs_exit(void) +{ + configfs_unregister_subsystem(&xe_configfs); +} + diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h new file mode 100644 index 000000000000..d7d041ec2611 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_configfs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ +#ifndef _XE_CONFIGFS_H_ +#define _XE_CONFIGFS_H_ + +#include <linux/types.h> + +struct pci_dev; + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +int xe_configfs_init(void); +void xe_configfs_exit(void); +bool xe_configfs_get_survivability_mode(struct pci_dev *pdev); +void xe_configfs_clear_survivability_mode(struct pci_dev *pdev); +#else +static inline int xe_configfs_init(void) { return 0; }; +static inline void xe_configfs_exit(void) {}; +static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }; +static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {}; +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 81b9d9bb3f57..a9e618abf8ac 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -80,7 +80,8 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q) return &q->gt->uc.guc; } -static ssize_t __xe_devcoredump_read(char *buffer, size_t count, +static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count, + ssize_t start, struct xe_devcoredump *coredump) { struct xe_device *xe; @@ -94,7 +95,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count, ss = &coredump->snapshot; iter.data = buffer; - iter.start = 0; + iter.start = start; iter.remain = count; p = drm_coredump_printer(&iter); @@ -168,6 +169,8 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) ss->vm = NULL; } +#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G) + static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { @@ -183,6 +186,9 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, /* Ensure delayed work is captured before continuing */ flush_work(&ss->work); + if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + xe_pm_runtime_get(gt_to_xe(ss->gt)); + mutex_lock(&coredump->lock); if (!ss->read.buffer) { @@ -195,12 +201,26 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, return 0; } + if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX || + offset < ss->read.chunk_position) { + ss->read.chunk_position = + ALIGN_DOWN(offset, XE_DEVCOREDUMP_CHUNK_MAX); + + __xe_devcoredump_read(ss->read.buffer, + XE_DEVCOREDUMP_CHUNK_MAX, + ss->read.chunk_position, coredump); + } + byte_copied = count < ss->read.size - offset ? count : ss->read.size - offset; - memcpy(buffer, ss->read.buffer + offset, byte_copied); + memcpy(buffer, ss->read.buffer + + (offset % XE_DEVCOREDUMP_CHUNK_MAX), byte_copied); mutex_unlock(&coredump->lock); + if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + xe_pm_runtime_put(gt_to_xe(ss->gt)); + return byte_copied; } @@ -254,17 +274,32 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); - xe_pm_runtime_put(xe); + ss->read.chunk_position = 0; /* Calculate devcoredump size */ - ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); - - ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); - if (!ss->read.buffer) - return; + ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, 0, coredump); + + if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) { + ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX, + GFP_USER); + if (!ss->read.buffer) + goto put_pm; + + __xe_devcoredump_read(ss->read.buffer, + XE_DEVCOREDUMP_CHUNK_MAX, + 0, coredump); + } else { + ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); + if (!ss->read.buffer) + goto put_pm; + + __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0, + coredump); + xe_devcoredump_snapshot_free(ss); + } - __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump); - xe_devcoredump_snapshot_free(ss); +put_pm: + xe_pm_runtime_put(xe); } static void devcoredump_snapshot(struct xe_devcoredump *coredump, @@ -425,7 +460,7 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi if (offset & 3) drm_printf(p, "Offset not word aligned: %zu", offset); - line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL); + line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC); if (!line_buff) { drm_printf(p, "Failed to allocate line buffer\n"); return; diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h index 1a1d16a96b2d..a174385a6d83 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump_types.h +++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h @@ -66,6 +66,8 @@ struct xe_devcoredump_snapshot { struct { /** @read.size: size of devcoredump in human readable format */ ssize_t size; + /** @read.chunk_position: position of devcoredump chunk */ + ssize_t chunk_position; /** @read.buffer: buffer of devcoredump in human readable format */ char *buffer; } read; diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 00191227bc95..75e753e0a682 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -23,6 +23,7 @@ #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_bo.h" +#include "xe_bo_evict.h" #include "xe_debugfs.h" #include "xe_devcoredump.h" #include "xe_dma_buf.h" @@ -467,10 +468,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xa_erase(&xe->usm.asid_to_vm, asid); } - spin_lock_init(&xe->pinned.lock); - INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); - INIT_LIST_HEAD(&xe->pinned.external_vram); - INIT_LIST_HEAD(&xe->pinned.evicted); + err = xe_bo_pinned_init(xe); + if (err) + goto err; xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", WQ_MEM_RECLAIM); @@ -505,7 +505,15 @@ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ static bool xe_driver_flr_disabled(struct xe_device *xe) { - return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; + if (IS_SRIOV_VF(xe)) + return true; + + if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { + drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); + return true; + } + + return false; } /* @@ -523,7 +531,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe) */ static void __xe_driver_flr(struct xe_device *xe) { - const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ + const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */ struct xe_mmio *mmio = xe_root_tile_mmio(xe); int ret; @@ -569,10 +577,8 @@ static void __xe_driver_flr(struct xe_device *xe) static void xe_driver_flr(struct xe_device *xe) { - if (xe_driver_flr_disabled(xe)) { - drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); + if (xe_driver_flr_disabled(xe)) return; - } __xe_driver_flr(xe); } @@ -706,7 +712,7 @@ int xe_device_probe_early(struct xe_device *xe) sriov_update_device_info(xe); err = xe_pcode_probe_early(xe); - if (err) { + if (err || xe_survivability_mode_is_requested(xe)) { int save_err = err; /* @@ -729,6 +735,7 @@ int xe_device_probe_early(struct xe_device *xe) return 0; } +ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */ static int probe_has_flat_ccs(struct xe_device *xe) { @@ -932,6 +939,8 @@ void xe_device_remove(struct xe_device *xe) xe_display_unregister(xe); drm_dev_unplug(&xe->drm); + + xe_bo_pci_dev_remove_all(xe); } void xe_device_shutdown(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 9f8667ebba85..495bc00ebed4 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -107,6 +107,9 @@ struct xe_vram_region { resource_size_t actual_physical_size; /** @mapping: pointer to VRAM mappable space */ void __iomem *mapping; + /** @ttm: VRAM TTM manager */ + struct xe_ttm_vram_mgr ttm; +#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) /** @pagemap: Used to remap device memory as ZONE_DEVICE */ struct dev_pagemap pagemap; /** @@ -120,8 +123,7 @@ struct xe_vram_region { * This is generated when remap device memory as ZONE_DEVICE */ resource_size_t hpa_base; - /** @ttm: VRAM TTM manager */ - struct xe_ttm_vram_mgr ttm; +#endif }; /** @@ -314,6 +316,8 @@ struct xe_device { u8 has_atomic_enable_pte_bit:1; /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */ u8 has_device_atomics_on_smem:1; + /** @info.has_fan_control: Device supports fan control */ + u8 has_fan_control:1; /** @info.has_flat_ccs: Whether flat CCS metadata is used */ u8 has_flat_ccs:1; /** @info.has_heci_cscfi: device has heci cscfi */ @@ -332,6 +336,8 @@ struct xe_device { u8 has_usm:1; /** @info.is_dgfx: is discrete device */ u8 is_dgfx:1; + /** @info.needs_scratch: needs scratch page for oob prefetch to work */ + u8 needs_scratch:1; /** * @info.probe_display: Probe display hardware. If set to * false, the driver will behave as if there is no display @@ -418,12 +424,22 @@ struct xe_device { struct { /** @pinned.lock: protected pinned BO list state */ spinlock_t lock; - /** @pinned.kernel_bo_present: pinned kernel BO that are present */ - struct list_head kernel_bo_present; - /** @pinned.evicted: pinned BO that have been evicted */ - struct list_head evicted; - /** @pinned.external_vram: pinned external BO in vram*/ - struct list_head external_vram; + /** @pinned.early: early pinned lists */ + struct { + /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */ + struct list_head kernel_bo_present; + /** @pinned.early.evicted: pinned BO that have been evicted */ + struct list_head evicted; + } early; + /** @pinned.late: late pinned lists */ + struct { + /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */ + struct list_head kernel_bo_present; + /** @pinned.late.evicted: pinned BO that have been evicted */ + struct list_head evicted; + /** @pinned.external: pinned external and dma-buf. */ + struct list_head external; + } late; } pinned; /** @ufence_wq: user fence wait queue */ @@ -506,6 +522,9 @@ struct xe_device { struct mutex lock; } d3cold; + /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */ + struct notifier_block pm_notifier; + /** @pmt: Support the PMT driver callback interface */ struct { /** @pmt.lock: protect access for telemetry data */ @@ -586,6 +605,7 @@ struct xe_device { INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, INTEL_DRAM_GDDR_ECC, + __INTEL_DRAM_TYPE_MAX, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index f7a20264ea33..346f857f3837 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -233,7 +233,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach) struct drm_gem_object *obj = attach->importer_priv; struct xe_bo *bo = gem_to_xe_bo(obj); - XE_WARN_ON(xe_bo_evict(bo, false)); + XE_WARN_ON(xe_bo_evict(bo)); } static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = { diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 4f6784e5abf8..8a5cba22b586 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -49,9 +49,6 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) fw->gt = gt; spin_lock_init(&fw->lock); - /* Assuming gen11+ so assert this assumption is correct */ - xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); - if (xe->info.graphics_verx100 >= 1270) { init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, @@ -67,9 +64,6 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) { int i, j; - /* Assuming gen11+ so assert this assumption is correct */ - xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); - if (!xe_gt_is_media_type(gt)) init_domain(fw, XE_FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER, diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 5fcb2b4c2c13..7062115909f2 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -365,7 +365,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt) * scratch entries, rather keep the scratch page in system memory on * platforms where 64K pages are needed for VRAM. */ - flags = XE_BO_FLAG_PINNED; + flags = 0; if (ggtt->flags & XE_GGTT_FLAGS_64K) flags |= XE_BO_FLAG_SYSTEM; else diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 10a9e3c72b36..8068b4bc0a09 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -12,8 +12,10 @@ #include <generated/xe_wa_oob.h> +#include "instructions/xe_alu_commands.h" #include "instructions/xe_gfxpipe_commands.h" #include "instructions/xe_mi_commands.h" +#include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" #include "xe_assert.h" #include "xe_bb.h" @@ -176,15 +178,6 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) return 0; } -/* - * Convert back from encoded value to type-safe, only to be used when reg.mcr - * is true - */ -static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg) -{ - return (const struct xe_reg_mcr){.__reg.raw = reg.raw }; -} - static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) { struct xe_reg_sr *sr = &q->hwe->reg_lrc; @@ -194,6 +187,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) struct xe_bb *bb; struct dma_fence *fence; long timeout; + int count_rmw = 0; int count = 0; if (q->hwe->class == XE_ENGINE_CLASS_RENDER) @@ -206,30 +200,32 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) if (IS_ERR(bb)) return PTR_ERR(bb); - xa_for_each(&sr->xa, idx, entry) - ++count; + /* count RMW registers as those will be handled separately */ + xa_for_each(&sr->xa, idx, entry) { + if (entry->reg.masked || entry->clr_bits == ~0) + ++count; + else + ++count_rmw; + } - if (count) { + if (count || count_rmw) xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name); + if (count) { + /* emit single LRI with all non RMW regs */ + bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count); xa_for_each(&sr->xa, idx, entry) { struct xe_reg reg = entry->reg; - struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg); u32 val; - /* - * Skip reading the register if it's not really needed - */ if (reg.masked) val = entry->clr_bits << 16; - else if (entry->clr_bits + 1) - val = (reg.mcr ? - xe_gt_mcr_unicast_read_any(gt, reg_mcr) : - xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits); - else + else if (entry->clr_bits == ~0) val = 0; + else + continue; val |= entry->set_bits; @@ -239,6 +235,52 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) } } + if (count_rmw) { + /* emit MI_MATH for each RMW reg */ + + xa_for_each(&sr->xa, idx, entry) { + if (entry->reg.masked || entry->clr_bits == ~0) + continue; + + bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; + bb->cs[bb->len++] = entry->reg.addr; + bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + + bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | + MI_LRI_LRM_CS_MMIO; + bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr; + bb->cs[bb->len++] = entry->clr_bits; + bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr; + bb->cs[bb->len++] = entry->set_bits; + + bb->cs[bb->len++] = MI_MATH(8); + bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0); + bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1); + bb->cs[bb->len++] = CS_ALU_INSTR_AND; + bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU); + bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0); + bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2); + bb->cs[bb->len++] = CS_ALU_INSTR_OR; + bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU); + + bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO; + bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + bb->cs[bb->len++] = entry->reg.addr; + + xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n", + entry->reg.addr, entry->clr_bits, entry->set_bits); + } + + /* reset used GPR */ + bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO; + bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + bb->cs[bb->len++] = 0; + bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr; + bb->cs[bb->len++] = 0; + bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr; + bb->cs[bb->len++] = 0; + } + xe_lrc_emit_hwe_state_instructions(q, bb); job = xe_bb_create_job(q, bb); diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index 2d63a69cbfa3..a88076e9cc7d 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -299,20 +299,20 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p) return 0; } -static const struct drm_info_list debugfs_list[] = { - {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines}, +/* + * only for GT debugfs files which can be safely used on the VF as well: + * - without access to the GT privileged registers + * - without access to the PF specific data + */ +static const struct drm_info_list vf_safe_debugfs_list[] = { {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset}, {"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync}, {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info}, {"topology", .show = xe_gt_debugfs_simple_show, .data = topology}, - {"steering", .show = xe_gt_debugfs_simple_show, .data = steering}, {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt}, - {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info}, {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore}, {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds}, {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings}, - {"pat", .show = xe_gt_debugfs_simple_show, .data = pat}, - {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs}, {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc}, {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc}, {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc}, @@ -322,6 +322,15 @@ static const struct drm_info_list debugfs_list[] = { {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig}, }; +/* everything else should be added here */ +static const struct drm_info_list pf_only_debugfs_list[] = { + {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines}, + {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs}, + {"pat", .show = xe_gt_debugfs_simple_show, .data = pat}, + {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info}, + {"steering", .show = xe_gt_debugfs_simple_show, .data = steering}, +}; + void xe_gt_debugfs_register(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); @@ -345,10 +354,15 @@ void xe_gt_debugfs_register(struct xe_gt *gt) */ root->d_inode->i_private = gt; - drm_debugfs_create_files(debugfs_list, - ARRAY_SIZE(debugfs_list), + drm_debugfs_create_files(vf_safe_debugfs_list, + ARRAY_SIZE(vf_safe_debugfs_list), root, minor); + if (!IS_SRIOV_VF(xe)) + drm_debugfs_create_files(pf_only_debugfs_list, + ARRAY_SIZE(pf_only_debugfs_list), + root, minor); + xe_uc_debugfs_register(>->uc, root); if (IS_SRIOV_PF(xe)) diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index 604bdc7c8173..868a5d2c1a52 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -56,9 +56,10 @@ dev_to_xe(struct device *dev) return gt_to_xe(kobj_to_gt(dev->kobj.parent)); } -static ssize_t act_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t act_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; @@ -68,11 +69,12 @@ static ssize_t act_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static DEVICE_ATTR_RO(act_freq); +static struct kobj_attribute attr_act_freq = __ATTR_RO(act_freq); -static ssize_t cur_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t cur_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; ssize_t ret; @@ -85,11 +87,12 @@ static ssize_t cur_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static DEVICE_ATTR_RO(cur_freq); +static struct kobj_attribute attr_cur_freq = __ATTR_RO(cur_freq); -static ssize_t rp0_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rp0_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; @@ -99,11 +102,12 @@ static ssize_t rp0_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static DEVICE_ATTR_RO(rp0_freq); +static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq); -static ssize_t rpe_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rpe_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; @@ -113,11 +117,12 @@ static ssize_t rpe_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static DEVICE_ATTR_RO(rpe_freq); +static struct kobj_attribute attr_rpe_freq = __ATTR_RO(rpe_freq); -static ssize_t rpa_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rpa_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; @@ -127,20 +132,22 @@ static ssize_t rpa_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static DEVICE_ATTR_RO(rpa_freq); +static struct kobj_attribute attr_rpa_freq = __ATTR_RO(rpa_freq); -static ssize_t rpn_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rpn_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc)); } -static DEVICE_ATTR_RO(rpn_freq); +static struct kobj_attribute attr_rpn_freq = __ATTR_RO(rpn_freq); -static ssize_t min_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t min_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; ssize_t ret; @@ -154,9 +161,10 @@ static ssize_t min_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, - const char *buff, size_t count) +static ssize_t min_freq_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buff, size_t count) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; ssize_t ret; @@ -173,11 +181,12 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR_RW(min_freq); +static struct kobj_attribute attr_min_freq = __ATTR_RW(min_freq); -static ssize_t max_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t max_freq_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; ssize_t ret; @@ -191,9 +200,10 @@ static ssize_t max_freq_show(struct device *dev, return sysfs_emit(buf, "%d\n", freq); } -static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, - const char *buff, size_t count) +static ssize_t max_freq_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buff, size_t count) { + struct device *dev = kobj_to_dev(kobj); struct xe_guc_pc *pc = dev_to_pc(dev); u32 freq; ssize_t ret; @@ -210,17 +220,17 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR_RW(max_freq); +static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq); static const struct attribute *freq_attrs[] = { - &dev_attr_act_freq.attr, - &dev_attr_cur_freq.attr, - &dev_attr_rp0_freq.attr, - &dev_attr_rpa_freq.attr, - &dev_attr_rpe_freq.attr, - &dev_attr_rpn_freq.attr, - &dev_attr_min_freq.attr, - &dev_attr_max_freq.attr, + &attr_act_freq.attr, + &attr_cur_freq.attr, + &attr_rp0_freq.attr, + &attr_rpa_freq.attr, + &attr_rpe_freq.attr, + &attr_rpn_freq.attr, + &attr_min_freq.attr, + &attr_max_freq.attr, NULL }; diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index fbbace7b0b12..c11206410a4d 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -249,9 +249,10 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p) return 0; } -static ssize_t name_show(struct device *dev, - struct device_attribute *attr, char *buff) +static ssize_t name_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt_idle *gtidle = dev_to_gtidle(dev); struct xe_guc_pc *pc = gtidle_to_pc(gtidle); ssize_t ret; @@ -262,11 +263,12 @@ static ssize_t name_show(struct device *dev, return ret; } -static DEVICE_ATTR_RO(name); +static struct kobj_attribute name_attr = __ATTR_RO(name); -static ssize_t idle_status_show(struct device *dev, - struct device_attribute *attr, char *buff) +static ssize_t idle_status_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt_idle *gtidle = dev_to_gtidle(dev); struct xe_guc_pc *pc = gtidle_to_pc(gtidle); enum xe_gt_idle_state state; @@ -277,6 +279,7 @@ static ssize_t idle_status_show(struct device *dev, return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state)); } +static struct kobj_attribute idle_status_attr = __ATTR_RO(idle_status); u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle) { @@ -291,10 +294,11 @@ u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle) return residency; } -static DEVICE_ATTR_RO(idle_status); -static ssize_t idle_residency_ms_show(struct device *dev, - struct device_attribute *attr, char *buff) + +static ssize_t idle_residency_ms_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt_idle *gtidle = dev_to_gtidle(dev); struct xe_guc_pc *pc = gtidle_to_pc(gtidle); u64 residency; @@ -305,12 +309,12 @@ static ssize_t idle_residency_ms_show(struct device *dev, return sysfs_emit(buff, "%llu\n", residency); } -static DEVICE_ATTR_RO(idle_residency_ms); +static struct kobj_attribute idle_residency_attr = __ATTR_RO(idle_residency_ms); static const struct attribute *gt_idle_attrs[] = { - &dev_attr_name.attr, - &dev_attr_idle_status.attr, - &dev_attr_idle_residency_ms.attr, + &name_attr.attr, + &idle_status_attr.attr, + &idle_residency_attr.attr, NULL, }; diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 605aad3554e7..d4d9730f0d2c 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -345,7 +345,8 @@ fallback: * Some older platforms don't have tables or don't have complete tables. * Newer platforms should always have the required info. */ - if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000) + if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000 && + !gt_to_xe(gt)->info.force_execlist) xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n"); if (gt_to_xe(gt)->info.platform == XE_PVC) diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index c5ad9a0a89c2..10622ca471a2 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -240,7 +240,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) atomic = access_is_atomic(pf->access_type); if (xe_vma_is_cpu_addr_mirror(vma)) - err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt), + err = xe_svm_handle_pagefault(vm, vma, gt, pf->page_addr, atomic); else err = handle_vma_pagefault(gt, vma, atomic); @@ -435,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss, XE_MAX_EU_FUSE_BITS) * num_dss; - /* user can issue separate page faults per EU and per CS */ + /* + * user can issue separate page faults per EU and per CS + * + * XXX: Multiplier required as compute UMD are getting PF queue errors + * without it. Follow on why this multiplier is required. + */ +#define PF_MULTIPLIER 8 pf_queue->num_dw = - (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW; + (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; +#undef PF_MULTIPLIER pf_queue->gt = gt; pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw, diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 10be109bf357..2420a548cacc 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1444,15 +1444,23 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) return 0; xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); - bo = xe_bo_create_pin_map(xe, tile, NULL, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_NEEDS_2M | - XE_BO_FLAG_PINNED); + bo = xe_bo_create_locked(xe, tile, NULL, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_NEEDS_2M | + XE_BO_FLAG_PINNED | + XE_BO_FLAG_PINNED_LATE_RESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); + err = xe_bo_pin(bo); + xe_bo_unlock(bo); + if (unlikely(err)) { + xe_bo_put(bo); + return err; + } + config->lmem_obj = bo; if (xe_device_has_lmtt(xe)) { diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index b2521dd6ec42..0fe47f41b63c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -51,27 +51,18 @@ static unsigned int extract_vfid(struct dentry *d) * /sys/kernel/debug/dri/0/ * ├── gt0 * │ ├── pf - * │ │ ├── ggtt_available - * │ │ ├── ggtt_provisioned * │ │ ├── contexts_provisioned * │ │ ├── doorbells_provisioned * │ │ ├── runtime_registers * │ │ ├── negotiated_versions * │ │ ├── adverse_events + * ├── gt1 + * │ ├── pf + * │ │ ├── ... */ static const struct drm_info_list pf_info[] = { { - "ggtt_available", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_config_print_available_ggtt, - }, - { - "ggtt_provisioned", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_config_print_ggtt, - }, - { "contexts_provisioned", .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_pf_config_print_ctxs, @@ -82,11 +73,6 @@ static const struct drm_info_list pf_info[] = { .data = xe_gt_sriov_pf_config_print_dbs, }, { - "lmem_provisioned", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_config_print_lmem, - }, - { "runtime_registers", .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_pf_service_print_runtime, @@ -107,6 +93,42 @@ static const struct drm_info_list pf_info[] = { * /sys/kernel/debug/dri/0/ * ├── gt0 * │ ├── pf + * │ │ ├── ggtt_available + * │ │ ├── ggtt_provisioned + */ + +static const struct drm_info_list pf_ggtt_info[] = { + { + "ggtt_available", + .show = xe_gt_debugfs_simple_show, + .data = xe_gt_sriov_pf_config_print_available_ggtt, + }, + { + "ggtt_provisioned", + .show = xe_gt_debugfs_simple_show, + .data = xe_gt_sriov_pf_config_print_ggtt, + }, +}; + +/* + * /sys/kernel/debug/dri/0/ + * ├── gt0 + * │ ├── pf + * │ │ ├── lmem_provisioned + */ + +static const struct drm_info_list pf_lmem_info[] = { + { + "lmem_provisioned", + .show = xe_gt_debugfs_simple_show, + .data = xe_gt_sriov_pf_config_print_lmem, + }, +}; + +/* + * /sys/kernel/debug/dri/0/ + * ├── gt0 + * │ ├── pf * │ │ ├── reset_engine * │ │ ├── sample_period * │ │ ├── sched_if_idle @@ -532,6 +554,16 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) pfdentry->d_inode->i_private = gt; drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor); + if (!xe_gt_is_media_type(gt)) { + drm_debugfs_create_files(pf_ggtt_info, + ARRAY_SIZE(pf_ggtt_info), + pfdentry, minor); + if (IS_DGFX(gt_to_xe(gt))) + drm_debugfs_create_files(pf_lmem_info, + ARRAY_SIZE(pf_lmem_info), + pfdentry, minor); + } + pf_add_policy_attrs(gt, pfdentry); pf_add_config_attrs(gt, pfdentry, PFID); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c index 4efde5f46b43..821cfcc34e6b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c @@ -112,7 +112,6 @@ static const struct xe_reg tgl_runtime_regs[] = { XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -124,7 +123,6 @@ static const struct xe_reg ats_m_runtime_regs[] = { XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -136,7 +134,6 @@ static const struct xe_reg pvc_runtime_regs[] = { GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */ - CTC_MODE, /* _MMIO(0xA26C) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -150,7 +147,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = { GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -167,7 +163,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = { XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -185,7 +180,6 @@ static const struct xe_reg ver_3000_runtime_regs[] = { XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c index 6155ea354432..30f942671c2b 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats.c +++ b/drivers/gpu/drm/xe/xe_gt_stats.c @@ -27,6 +27,7 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr) } static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = { + "svm_pagefault_count", "tlb_inval_count", "vma_pagefault_count", "vma_pagefault_kb", diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h index d556771f99d6..be3244d7133c 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats_types.h +++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h @@ -7,6 +7,7 @@ #define _XE_GT_STATS_TYPES_H_ enum xe_gt_stats_id { + XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, XE_GT_STATS_ID_TLB_INVAL, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c index 8db78d616b6f..aa962c783cdf 100644 --- a/drivers/gpu/drm/xe/xe_gt_throttle.c +++ b/drivers/gpu/drm/xe/xe_gt_throttle.c @@ -114,115 +114,115 @@ static u32 read_reason_vr_tdc(struct xe_gt *gt) return tdc; } -static ssize_t status_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t status_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool status = !!read_status(gt); return sysfs_emit(buff, "%u\n", status); } -static DEVICE_ATTR_RO(status); +static struct kobj_attribute attr_status = __ATTR_RO(status); -static ssize_t reason_pl1_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_pl1_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool pl1 = !!read_reason_pl1(gt); return sysfs_emit(buff, "%u\n", pl1); } -static DEVICE_ATTR_RO(reason_pl1); +static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1); -static ssize_t reason_pl2_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_pl2_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool pl2 = !!read_reason_pl2(gt); return sysfs_emit(buff, "%u\n", pl2); } -static DEVICE_ATTR_RO(reason_pl2); +static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2); -static ssize_t reason_pl4_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_pl4_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool pl4 = !!read_reason_pl4(gt); return sysfs_emit(buff, "%u\n", pl4); } -static DEVICE_ATTR_RO(reason_pl4); +static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4); -static ssize_t reason_thermal_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_thermal_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool thermal = !!read_reason_thermal(gt); return sysfs_emit(buff, "%u\n", thermal); } -static DEVICE_ATTR_RO(reason_thermal); +static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal); -static ssize_t reason_prochot_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_prochot_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool prochot = !!read_reason_prochot(gt); return sysfs_emit(buff, "%u\n", prochot); } -static DEVICE_ATTR_RO(reason_prochot); +static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot); -static ssize_t reason_ratl_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_ratl_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool ratl = !!read_reason_ratl(gt); return sysfs_emit(buff, "%u\n", ratl); } -static DEVICE_ATTR_RO(reason_ratl); +static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl); -static ssize_t reason_vr_thermalert_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_vr_thermalert_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool thermalert = !!read_reason_vr_thermalert(gt); return sysfs_emit(buff, "%u\n", thermalert); } -static DEVICE_ATTR_RO(reason_vr_thermalert); +static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert); -static ssize_t reason_vr_tdc_show(struct device *dev, - struct device_attribute *attr, - char *buff) +static ssize_t reason_vr_tdc_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) { + struct device *dev = kobj_to_dev(kobj); struct xe_gt *gt = dev_to_gt(dev); bool tdc = !!read_reason_vr_tdc(gt); return sysfs_emit(buff, "%u\n", tdc); } -static DEVICE_ATTR_RO(reason_vr_tdc); +static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc); static struct attribute *throttle_attrs[] = { - &dev_attr_status.attr, - &dev_attr_reason_pl1.attr, - &dev_attr_reason_pl2.attr, - &dev_attr_reason_pl4.attr, - &dev_attr_reason_thermal.attr, - &dev_attr_reason_prochot.attr, - &dev_attr_reason_ratl.attr, - &dev_attr_reason_vr_thermalert.attr, - &dev_attr_reason_vr_tdc.attr, + &attr_status.attr, + &attr_reason_pl1.attr, + &attr_reason_pl2.attr, + &attr_reason_pl4.attr, + &attr_reason_thermal.attr, + &attr_reason_prochot.attr, + &attr_reason_ratl.attr, + &attr_reason_vr_thermalert.attr, + &attr_reason_vr_tdc.attr, NULL }; diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index bc5714a5b36b..c5aace59b62c 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -483,7 +483,8 @@ static int guc_g2g_alloc(struct xe_guc *guc) XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_ALL | - XE_BO_FLAG_GGTT_INVALIDATE); + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -1393,6 +1394,7 @@ proto: /* Use data from the GuC response as our return value */ return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header); } +ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO); int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len) { diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 7031542a70ce..44c1fa2fe7c8 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -376,6 +376,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads) GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET, &offset, &remain); + if (GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708)) + guc_waklv_enable_simple(ads, + GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH, + &offset, &remain); + size = guc_ads_waklv_size(ads) - remain; if (!size) return; @@ -414,7 +419,8 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -710,8 +716,8 @@ static int guc_capture_prep_lists(struct xe_guc_ads *ads) } if (ads->capture_size != PAGE_ALIGN(total_size)) - xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n", - ads->capture_size, PAGE_ALIGN(total_size)); + xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n", + PAGE_ALIGN(total_size), ads->capture_size); return PAGE_ALIGN(total_size); } diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index 9095618648bc..859a3ba91be5 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -105,49 +105,49 @@ struct __guc_capture_parsed_output { * 3. Incorrect order will trigger XE_WARN. */ #define COMMON_XELP_BASE_GLOBAL \ - { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"} + { FORCEWAKE_GT, REG_32BIT, 0, 0, 0, "FORCEWAKE_GT"} #define COMMON_BASE_ENGINE_INSTANCE \ - { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \ - { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \ - { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \ - { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \ - { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \ - { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \ - { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \ - { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \ - { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \ - { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \ - { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \ - { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \ - { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \ - { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \ - { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ - { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \ - { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ - { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \ - { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ - { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \ - { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ - { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \ - { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ - { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \ - { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \ - { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"} + { RING_HWSTAM(0), REG_32BIT, 0, 0, 0, "HWSTAM"}, \ + { RING_HWS_PGA(0), REG_32BIT, 0, 0, 0, "RING_HWS_PGA"}, \ + { RING_HEAD(0), REG_32BIT, 0, 0, 0, "RING_HEAD"}, \ + { RING_TAIL(0), REG_32BIT, 0, 0, 0, "RING_TAIL"}, \ + { RING_CTL(0), REG_32BIT, 0, 0, 0, "RING_CTL"}, \ + { RING_MI_MODE(0), REG_32BIT, 0, 0, 0, "RING_MI_MODE"}, \ + { RING_MODE(0), REG_32BIT, 0, 0, 0, "RING_MODE"}, \ + { RING_ESR(0), REG_32BIT, 0, 0, 0, "RING_ESR"}, \ + { RING_EMR(0), REG_32BIT, 0, 0, 0, "RING_EMR"}, \ + { RING_EIR(0), REG_32BIT, 0, 0, 0, "RING_EIR"}, \ + { RING_IMR(0), REG_32BIT, 0, 0, 0, "RING_IMR"}, \ + { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \ + { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \ + { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \ + { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ + { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \ + { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ + { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_BBADDR"}, \ + { RING_START(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ + { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_START"}, \ + { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ + { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_DMA_FADD"}, \ + { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ + { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_STATUS"}, \ + { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \ + { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_SQ_CONTENTS"} #define COMMON_XELP_RC_CLASS \ - { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"} + { RCU_MODE, REG_32BIT, 0, 0, 0, "RCU_MODE"} #define COMMON_XELP_RC_CLASS_INSTDONE \ - { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \ - { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \ - { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"} + { SC_INSTDONE, REG_32BIT, 0, 0, 0, "SC_INSTDONE"}, \ + { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA"}, \ + { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA2"} #define XELP_VEC_CLASS_REGS \ - { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \ - { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \ - { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \ - { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"} + { SFC_DONE(0), 0, 0, 0, 0, "SFC_DONE[0]"}, \ + { SFC_DONE(1), 0, 0, 0, 0, "SFC_DONE[1]"}, \ + { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \ + { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"} /* XE_LP Global */ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = { @@ -352,7 +352,7 @@ static const struct __ext_steer_reg xehpg_extregs[] = { static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, const struct __ext_steer_reg *extlist, - int slice_id, int subslice_id) + u32 dss_id, u16 slice_id, u16 subslice_id) { if (!ext || !extlist) return; @@ -361,6 +361,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); + ext->dss_id = dss_id; ext->regname = extlist->name; } @@ -397,7 +398,7 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); u16 slice, subslice; - int iter, i, total = 0; + int dss, i, total = 0; const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists; const struct __guc_mmio_reg_descr_group *list; struct __guc_mmio_reg_descr_group *extlists; @@ -454,15 +455,15 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc) /* For steering registers, the list is generated at run-time */ extarray = (struct __guc_mmio_reg_descr *)extlists[0].list; - for_each_dss_steering(iter, gt, slice, subslice) { + for_each_dss_steering(dss, gt, slice, subslice) { for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) { - __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice); + __fill_ext_reg(extarray, &xe_extregs[i], dss, slice, subslice); ++extarray; } if (has_xehpg_extregs) for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) { - __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice); + __fill_ext_reg(extarray, &xehpg_extregs[i], dss, slice, subslice); ++extarray; } } @@ -1672,18 +1673,16 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_ { struct xe_gt *gt = snapshot->hwe->gt; struct xe_device *xe = gt_to_xe(gt); - struct xe_guc *guc = >->uc.guc; struct xe_devcoredump *devcoredump = &xe->devcoredump; struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot; struct gcap_reg_list_info *reginfo = NULL; u32 i, last_value = 0; - bool is_ext, low32_ready = false; + bool low32_ready = false; if (!list || !list->list || list->num_regs == 0) return; XE_WARN_ON(!devcore_snapshot->matched_node); - is_ext = list == guc->capture->extlists; reginfo = &devcore_snapshot->matched_node->reginfo[type]; /* @@ -1749,17 +1748,12 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_ */ XE_WARN_ON(low32_ready); - if (is_ext) { - int dss, group, instance; - - group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags); - instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags); - dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance); - - drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value); - } else { + if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags)) + drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, + reg_desc->dss_id, value); + else drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value); - } + break; } } diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h index ca2d390ccbee..6cb439115597 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture_types.h +++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h @@ -39,6 +39,8 @@ struct __guc_mmio_reg_descr { u32 flags; /** @mask: The mask to apply */ u32 mask; + /** @dss_id: Cached index for steered registers */ + u32 dss_id; /** @regname: Name of the register */ const char *regname; }; diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 72ad576fc18e..2447de0ebedf 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -238,7 +238,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -1088,6 +1089,7 @@ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); return guc_ct_send_recv(ct, action, len, response_buffer, false); } +ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO); int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 *response_buffer) @@ -1828,10 +1830,10 @@ static void ct_dead_print(struct xe_dead_ct *dead) return; } - drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason); /* Can't generate a genuine core dump at this point, so just do the good bits */ drm_puts(&lp, "**** Xe Device Coredump ****\n"); + drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason); xe_device_snapshot_print(xe, &lp); drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c index c569ff456e74..f33013f8a0f3 100644 --- a/drivers/gpu/drm/xe/xe_guc_debugfs.c +++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c @@ -17,101 +17,119 @@ #include "xe_macros.h" #include "xe_pm.h" -static struct xe_guc *node_to_guc(struct drm_info_node *node) -{ - return node->info_ent->data; -} - -static int guc_info(struct seq_file *m, void *data) +/* + * guc_debugfs_show - A show callback for struct drm_info_list + * @m: the &seq_file + * @data: data used by the drm debugfs helpers + * + * This callback can be used in struct drm_info_list to describe debugfs + * files that are &xe_guc specific in similar way how we handle &xe_gt + * specific files using &xe_gt_debugfs_simple_show. + * + * It is assumed that those debugfs files will be created on directory entry + * which grandparent struct dentry d_inode->i_private points to &xe_gt. + * + * /sys/kernel/debug/dri/0/ + * ├── gt0 # dent->d_parent->d_parent (d_inode->i_private == gt) + * │ ├── uc # dent->d_parent + * │ │ ├── guc_info # dent + * │ │ ├── guc_... + * + * This function assumes that &m->private will be set to the &struct + * drm_info_node corresponding to the instance of the info on a given &struct + * drm_minor (see struct drm_info_list.show for details). + * + * This function also assumes that struct drm_info_list.data will point to the + * function code that will actually print a file content:: + * + * int (*print)(struct xe_guc *, struct drm_printer *) + * + * Example:: + * + * int foo(struct xe_guc *guc, struct drm_printer *p) + * { + * drm_printf(p, "enabled %d\n", guc->submission_state.enabled); + * return 0; + * } + * + * static const struct drm_info_list bar[] = { + * { name = "foo", .show = guc_debugfs_show, .data = foo }, + * }; + * + * parent = debugfs_create_dir("uc", gtdir); + * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), parent, minor); + * + * Return: 0 on success or a negative error code on failure. + */ +static int guc_debugfs_show(struct seq_file *m, void *data) { - struct xe_guc *guc = node_to_guc(m->private); - struct xe_device *xe = guc_to_xe(guc); struct drm_printer p = drm_seq_file_printer(m); + struct drm_info_node *node = m->private; + struct dentry *parent = node->dent->d_parent; + struct dentry *grandparent = parent->d_parent; + struct xe_gt *gt = grandparent->d_inode->i_private; + struct xe_device *xe = gt_to_xe(gt); + int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data; + int ret; xe_pm_runtime_get(xe); - xe_guc_print_info(guc, &p); + ret = print(>->uc.guc, &p); xe_pm_runtime_put(xe); - return 0; + return ret; } -static int guc_log(struct seq_file *m, void *data) +static int guc_log(struct xe_guc *guc, struct drm_printer *p) { - struct xe_guc *guc = node_to_guc(m->private); - struct xe_device *xe = guc_to_xe(guc); - struct drm_printer p = drm_seq_file_printer(m); - - xe_pm_runtime_get(xe); - xe_guc_log_print(&guc->log, &p); - xe_pm_runtime_put(xe); - + xe_guc_log_print(&guc->log, p); return 0; } -static int guc_log_dmesg(struct seq_file *m, void *data) +static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p) { - struct xe_guc *guc = node_to_guc(m->private); - struct xe_device *xe = guc_to_xe(guc); - - xe_pm_runtime_get(xe); xe_guc_log_print_dmesg(&guc->log); - xe_pm_runtime_put(xe); - return 0; } -static int guc_ctb(struct seq_file *m, void *data) +static int guc_ctb(struct xe_guc *guc, struct drm_printer *p) { - struct xe_guc *guc = node_to_guc(m->private); - struct xe_device *xe = guc_to_xe(guc); - struct drm_printer p = drm_seq_file_printer(m); - - xe_pm_runtime_get(xe); - xe_guc_ct_print(&guc->ct, &p, true); - xe_pm_runtime_put(xe); - + xe_guc_ct_print(&guc->ct, p, true); return 0; } -static int guc_pc(struct seq_file *m, void *data) +static int guc_pc(struct xe_guc *guc, struct drm_printer *p) { - struct xe_guc *guc = node_to_guc(m->private); - struct xe_device *xe = guc_to_xe(guc); - struct drm_printer p = drm_seq_file_printer(m); - - xe_pm_runtime_get(xe); - xe_guc_pc_print(&guc->pc, &p); - xe_pm_runtime_put(xe); - + xe_guc_pc_print(&guc->pc, p); return 0; } -static const struct drm_info_list debugfs_list[] = { - {"guc_info", guc_info, 0}, - {"guc_log", guc_log, 0}, - {"guc_log_dmesg", guc_log_dmesg, 0}, - {"guc_ctb", guc_ctb, 0}, - {"guc_pc", guc_pc, 0}, +/* + * only for GuC debugfs files which can be safely used on the VF as well: + * - without access to the GuC privileged registers + * - without access to the PF specific GuC objects + */ +static const struct drm_info_list vf_safe_debugfs_list[] = { + { "guc_info", .show = guc_debugfs_show, .data = xe_guc_print_info }, + { "guc_ctb", .show = guc_debugfs_show, .data = guc_ctb }, +}; + +/* everything else should be added here */ +static const struct drm_info_list pf_only_debugfs_list[] = { + { "guc_log", .show = guc_debugfs_show, .data = guc_log }, + { "guc_log_dmesg", .show = guc_debugfs_show, .data = guc_log_dmesg }, + { "guc_pc", .show = guc_debugfs_show, .data = guc_pc }, }; void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent) { struct drm_minor *minor = guc_to_xe(guc)->drm.primary; - struct drm_info_list *local; - int i; -#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list)) - local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL); - if (!local) - return; - - memcpy(local, debugfs_list, DEBUGFS_SIZE); -#undef DEBUGFS_SIZE - - for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i) - local[i].data = guc; - - drm_debugfs_create_files(local, - ARRAY_SIZE(debugfs_list), + drm_debugfs_create_files(vf_safe_debugfs_list, + ARRAY_SIZE(vf_safe_debugfs_list), parent, minor); + + if (!IS_SRIOV_VF(guc_to_xe(guc))) + drm_debugfs_create_files(pf_only_debugfs_list, + ARRAY_SIZE(pf_only_debugfs_list), + parent, minor); } diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c index 2a457dcf31d5..0fb48f8f05d8 100644 --- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c +++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c @@ -17,36 +17,61 @@ #include "xe_hw_engine.h" #include "xe_map.h" #include "xe_mmio.h" +#include "xe_sriov_pf_helpers.h" #include "xe_trace_guc.h" #define TOTAL_QUANTA 0x8000 -static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe) +static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe, + unsigned int index) { struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; - struct engine_activity_buffer *buffer = &engine_activity->device_buffer; + struct engine_activity_buffer *buffer; u16 guc_class = xe_engine_class_to_guc_class(hwe->class); size_t offset; - offset = offsetof(struct guc_engine_activity_data, + if (engine_activity->num_functions) { + buffer = &engine_activity->function_buffer; + offset = sizeof(struct guc_engine_activity_data) * index; + } else { + buffer = &engine_activity->device_buffer; + offset = 0; + } + + offset += offsetof(struct guc_engine_activity_data, engine_activity[guc_class][hwe->logical_instance]); return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset); } -static struct iosys_map engine_metadata_map(struct xe_guc *guc) +static struct iosys_map engine_metadata_map(struct xe_guc *guc, + unsigned int index) { struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; - struct engine_activity_buffer *buffer = &engine_activity->device_buffer; + struct engine_activity_buffer *buffer; + size_t offset; - return buffer->metadata_bo->vmap; + if (engine_activity->num_functions) { + buffer = &engine_activity->function_buffer; + offset = sizeof(struct guc_engine_activity_metadata) * index; + } else { + buffer = &engine_activity->device_buffer; + offset = 0; + } + + return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset); } static int allocate_engine_activity_group(struct xe_guc *guc) { struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; struct xe_device *xe = guc_to_xe(guc); - u32 num_activity_group = 1; /* Will be modified for VF */ + u32 num_activity_group; + + /* + * An additional activity group is allocated for PF + */ + num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1; engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group, sizeof(struct engine_activity_group), GFP_KERNEL); @@ -60,10 +85,11 @@ static int allocate_engine_activity_group(struct xe_guc *guc) } static int allocate_engine_activity_buffers(struct xe_guc *guc, - struct engine_activity_buffer *buffer) + struct engine_activity_buffer *buffer, + int count) { - u32 metadata_size = sizeof(struct guc_engine_activity_metadata); - u32 size = sizeof(struct guc_engine_activity_data); + u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count; + u32 size = sizeof(struct guc_engine_activity_data) * count; struct xe_gt *gt = guc_to_gt(guc); struct xe_tile *tile = gt_to_tile(gt); struct xe_bo *bo, *metadata_bo; @@ -118,10 +144,11 @@ static bool is_engine_activity_supported(struct xe_guc *guc) return true; } -static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe) +static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe, + unsigned int index) { struct xe_guc *guc = &hwe->gt->uc.guc; - struct engine_activity_group *eag = &guc->engine_activity.eag[0]; + struct engine_activity_group *eag = &guc->engine_activity.eag[index]; u16 guc_class = xe_engine_class_to_guc_class(hwe->class); return &eag->engine[guc_class][hwe->logical_instance]; @@ -138,9 +165,10 @@ static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq) #define read_metadata_record(xe_, map_, field_) \ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_) -static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe) +static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, + unsigned int index) { - struct engine_activity *ea = hw_engine_to_engine_activity(hwe); + struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index); struct guc_engine_activity *cached_activity = &ea->activity; struct guc_engine_activity_metadata *cached_metadata = &ea->metadata; struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; @@ -151,8 +179,8 @@ static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe) u64 active_ticks, gpm_ts; u16 change_num; - activity_map = engine_activity_map(guc, hwe); - metadata_map = engine_metadata_map(guc); + activity_map = engine_activity_map(guc, hwe, index); + metadata_map = engine_metadata_map(guc, index); global_change_num = read_metadata_record(xe, &metadata_map, global_change_num); /* GuC has not initialized activity data yet, return 0 */ @@ -194,9 +222,9 @@ update: return ea->total + ea->active; } -static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe) +static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index) { - struct engine_activity *ea = hw_engine_to_engine_activity(hwe); + struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index); struct guc_engine_activity_metadata *cached_metadata = &ea->metadata; struct guc_engine_activity *cached_activity = &ea->activity; struct iosys_map activity_map, metadata_map; @@ -205,8 +233,8 @@ static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe) u64 numerator; u16 quanta_ratio; - activity_map = engine_activity_map(guc, hwe); - metadata_map = engine_metadata_map(guc); + activity_map = engine_activity_map(guc, hwe, index); + metadata_map = engine_metadata_map(guc, index); if (!cached_metadata->guc_tsc_frequency_hz) cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map, @@ -245,12 +273,39 @@ static int enable_engine_activity_stats(struct xe_guc *guc) return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); } -static void engine_activity_set_cpu_ts(struct xe_guc *guc) +static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable) { struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; - struct engine_activity_group *eag = &engine_activity->eag[0]; + u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0; + struct engine_activity_buffer *buffer = &engine_activity->function_buffer; + u32 action[6]; + int len = 0; + + if (enable) { + metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo); + ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo); + num_functions = engine_activity->num_functions; + } + + action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER; + action[len++] = num_functions; + action[len++] = metadata_ggtt_addr; + action[len++] = 0; + action[len++] = ggtt_addr; + action[len++] = 0; + + /* Blocking here to ensure the buffers are ready before reading them */ + return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); +} + +static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index) +{ + struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; + struct engine_activity_group *eag = &engine_activity->eag[index]; int i, j; + xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group); + for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++) eag->engine[i][j].last_cpu_ts = ktime_get(); @@ -265,34 +320,107 @@ static u32 gpm_timestamp_shift(struct xe_gt *gt) return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg); } +static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; + + if (!IS_SRIOV_PF(xe) && fn_id) + return false; + + if (engine_activity->num_functions && fn_id >= engine_activity->num_functions) + return false; + + return true; +} + +static int engine_activity_disable_function_stats(struct xe_guc *guc) +{ + struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; + struct engine_activity_buffer *buffer = &engine_activity->function_buffer; + int ret; + + if (!engine_activity->num_functions) + return 0; + + ret = enable_function_engine_activity_stats(guc, false); + if (ret) + return ret; + + free_engine_activity_buffers(buffer); + engine_activity->num_functions = 0; + + return 0; +} + +static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs) +{ + struct xe_guc_engine_activity *engine_activity = &guc->engine_activity; + struct engine_activity_buffer *buffer = &engine_activity->function_buffer; + int ret, i; + + if (!num_vfs) + return 0; + + /* This includes 1 PF and num_vfs */ + engine_activity->num_functions = num_vfs + 1; + + ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions); + if (ret) + return ret; + + ret = enable_function_engine_activity_stats(guc, true); + if (ret) { + free_engine_activity_buffers(buffer); + engine_activity->num_functions = 0; + return ret; + } + + /* skip PF as it was already setup */ + for (i = 1; i < engine_activity->num_functions; i++) + engine_activity_set_cpu_ts(guc, i); + + return 0; +} + /** * xe_guc_engine_activity_active_ticks - Get engine active ticks * @guc: The GuC object * @hwe: The hw_engine object + * @fn_id: function id to report on * * Return: accumulated ticks @hwe was active since engine activity stats were enabled. */ -u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe) +u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, + unsigned int fn_id) { if (!xe_guc_engine_activity_supported(guc)) return 0; - return get_engine_active_ticks(guc, hwe); + if (!is_function_valid(guc, fn_id)) + return 0; + + return get_engine_active_ticks(guc, hwe, fn_id); } /** * xe_guc_engine_activity_total_ticks - Get engine total ticks * @guc: The GuC object * @hwe: The hw_engine object + * @fn_id: function id to report on * * Return: accumulated quanta of ticks allocated for the engine */ -u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe) +u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, + unsigned int fn_id) { if (!xe_guc_engine_activity_supported(guc)) return 0; - return get_engine_total_ticks(guc, hwe); + if (!is_function_valid(guc, fn_id)) + return 0; + + return get_engine_total_ticks(guc, hwe, fn_id); } /** @@ -311,6 +439,25 @@ bool xe_guc_engine_activity_supported(struct xe_guc *guc) } /** + * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats + * @guc: The GuC object + * @num_vfs: number of vfs + * @enable: true to enable, false otherwise + * + * Return: 0 on success, negative error code otherwise + */ +int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable) +{ + if (!xe_guc_engine_activity_supported(guc)) + return 0; + + if (enable) + return engine_activity_enable_function_stats(guc, num_vfs); + + return engine_activity_disable_function_stats(guc); +} + +/** * xe_guc_engine_activity_enable_stats - Enable engine activity stats * @guc: The GuC object * @@ -327,7 +474,7 @@ void xe_guc_engine_activity_enable_stats(struct xe_guc *guc) if (ret) xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret); else - engine_activity_set_cpu_ts(guc); + engine_activity_set_cpu_ts(guc, 0); } static void engine_activity_fini(void *arg) @@ -360,7 +507,7 @@ int xe_guc_engine_activity_init(struct xe_guc *guc) return ret; } - ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer); + ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1); if (ret) { xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret)); return ret; diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h index a042d4cb404c..b32926c2d208 100644 --- a/drivers/gpu/drm/xe/xe_guc_engine_activity.h +++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h @@ -14,6 +14,9 @@ struct xe_guc; int xe_guc_engine_activity_init(struct xe_guc *guc); bool xe_guc_engine_activity_supported(struct xe_guc *guc); void xe_guc_engine_activity_enable_stats(struct xe_guc *guc); -u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe); -u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe); +int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable); +u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, + unsigned int fn_id); +u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, + unsigned int fn_id); #endif diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h index 5cdd034b6b70..48f69ddefa36 100644 --- a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h +++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h @@ -79,14 +79,24 @@ struct xe_guc_engine_activity { /** @num_activity_group: number of activity groups */ u32 num_activity_group; + /** @num_functions: number of functions */ + u32 num_functions; + /** @supported: indicates support for engine activity stats */ bool supported; - /** @eag: holds the device level engine activity data */ + /** + * @eag: holds the device level engine activity data in native mode. + * In SRIOV mode, points to an array with entries which holds the engine + * activity data for PF and VF's + */ struct engine_activity_group *eag; /** @device_buffer: buffer object for global engine activity */ struct engine_activity_buffer device_buffer; + + /** @function_buffer: buffer object for per-function engine activity */ + struct engine_activity_buffer function_buffer; }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index 80514a446ba2..38039c411387 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -260,7 +260,8 @@ int xe_guc_log_init(struct xe_guc_log *log) bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(), XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 43b1192ba61c..18c623992035 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -462,6 +462,21 @@ static u32 get_cur_freq(struct xe_gt *gt) } /** + * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency + * @pc: The GuC PC + * + * Returns: the requested frequency for that GT instance + */ +u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + + xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); + + return get_cur_freq(gt); +} + +/** * xe_guc_pc_get_cur_freq - Get Current requested frequency * @pc: The GuC PC * @freq: A pointer to a u32 where the freq value will be returned @@ -1170,7 +1185,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc) bo = xe_managed_bo_create_pin_map(xe, tile, size, XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h index 39102b79602f..0a2664d5c811 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.h +++ b/drivers/gpu/drm/xe/xe_guc_pc.h @@ -22,6 +22,7 @@ void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p); u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc); int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq); +u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc); u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc); u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc); u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 31bc2022bfc2..813c3c0bb250 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -300,6 +300,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) primelockdep(guc); + guc->submission_state.initialized = true; + return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); } @@ -834,6 +836,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc) xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); + /* + * If device is being wedged even before submission_state is + * initialized, there's nothing to do here. + */ + if (!guc->submission_state.initialized) + return; + err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, guc_submit_wedged_fini, guc); if (err) { diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 63bac64429a5..1fde7614fcc5 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -89,6 +89,11 @@ struct xe_guc { struct mutex lock; /** @submission_state.enabled: submission is enabled */ bool enabled; + /** + * @submission_state.initialized: mark when submission state is + * even initialized - before that not even the lock is valid + */ + bool initialized; /** @submission_state.fini_wq: submit fini wait queue */ wait_queue_head_t fini_wq; } submission_state; diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c index a440442b4d72..640950172088 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c @@ -605,6 +605,7 @@ err_object: kobject_put(kobj); return err; } +ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */ static void hw_engine_class_sysfs_fini(void *arg) diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 48d80ffdf7bb..eb293aec36a0 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -5,6 +5,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/hwmon.h> +#include <linux/jiffies.h> #include <linux/types.h> #include <linux/units.h> @@ -27,6 +28,7 @@ enum xe_hwmon_reg { REG_PKG_POWER_SKU_UNIT, REG_GT_PERF_STATUS, REG_PKG_ENERGY_STATUS, + REG_FAN_SPEED, }; enum xe_hwmon_reg_operation { @@ -42,6 +44,13 @@ enum xe_hwmon_channel { CHANNEL_MAX, }; +enum xe_fan_channel { + FAN_1, + FAN_2, + FAN_3, + FAN_MAX, +}; + /* * SF_* - scale factors for particular quantities according to hwmon spec. */ @@ -62,6 +71,16 @@ struct xe_hwmon_energy_info { }; /** + * struct xe_hwmon_fan_info - to cache previous fan reading + */ +struct xe_hwmon_fan_info { + /** @reg_val_prev: previous fan reg val */ + u32 reg_val_prev; + /** @time_prev: previous timestamp */ + u64 time_prev; +}; + +/** * struct xe_hwmon - xe hwmon data structure */ struct xe_hwmon { @@ -79,6 +98,8 @@ struct xe_hwmon { int scl_shift_time; /** @ei: Energy info for energyN_input */ struct xe_hwmon_energy_info ei[CHANNEL_MAX]; + /** @fi: Fan info for fanN_input */ + struct xe_hwmon_fan_info fi[FAN_MAX]; }; static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg, @@ -144,6 +165,14 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg return PCU_CR_PACKAGE_ENERGY_STATUS; } break; + case REG_FAN_SPEED: + if (channel == FAN_1) + return BMG_FAN_1_SPEED; + else if (channel == FAN_2) + return BMG_FAN_2_SPEED; + else if (channel == FAN_3) + return BMG_FAN_3_SPEED; + break; default: drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg); break; @@ -454,6 +483,7 @@ static const struct hwmon_channel_info * const hwmon_info[] = { HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL), + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT), NULL }; @@ -480,6 +510,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval) (uval & POWER_SETUP_I1_DATA_MASK)); } +static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval) +{ + struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); + + /* Platforms that don't return correct value */ + if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) { + *uval = 2; + return 0; + } + + return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL); +} + static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel, long *value, u32 scale_factor) { @@ -706,6 +749,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) } static umode_t +xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) +{ + u32 uval; + + if (!hwmon->xe->info.has_fan_control) + return 0; + + switch (attr) { + case hwmon_fan_input: + if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval)) + return 0; + + return channel < uval ? 0444 : 0; + default: + return 0; + } +} + +static int +xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val) +{ + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); + struct xe_hwmon_fan_info *fi = &hwmon->fi[channel]; + u64 rotations, time_now, time; + u32 reg_val; + int ret = 0; + + mutex_lock(&hwmon->hwmon_lock); + + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel)); + time_now = get_jiffies_64(); + + /* + * HW register value is accumulated count of pulses from PWM fan with the scale + * of 2 pulses per rotation. + */ + rotations = (reg_val - fi->reg_val_prev) / 2; + + time = jiffies_delta_to_msecs(time_now - fi->time_prev); + if (unlikely(!time)) { + ret = -EAGAIN; + goto unlock; + } + + /* + * Calculate fan speed in RPM by time averaging two subsequent readings in minutes. + * RPM = number of rotations * msecs per minute / time in msecs + */ + *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time); + + fi->reg_val_prev = reg_val; + fi->time_prev = time_now; +unlock: + mutex_unlock(&hwmon->hwmon_lock); + return ret; +} + +static int +xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) +{ + switch (attr) { + case hwmon_fan_input: + return xe_hwmon_fan_input_read(hwmon, channel, val); + default: + return -EOPNOTSUPP; + } +} + +static umode_t xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel) { @@ -730,6 +842,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, case hwmon_energy: ret = xe_hwmon_energy_is_visible(hwmon, attr, channel); break; + case hwmon_fan: + ret = xe_hwmon_fan_is_visible(hwmon, attr, channel); + break; default: ret = 0; break; @@ -765,6 +880,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, case hwmon_energy: ret = xe_hwmon_energy_read(hwmon, attr, channel, val); break; + case hwmon_fan: + ret = xe_hwmon_fan_read(hwmon, attr, channel, val); + break; default: ret = -EOPNOTSUPP; break; @@ -842,7 +960,7 @@ static void xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) { struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); - long energy; + long energy, fan_speed; u64 val_sku_unit = 0; int channel; struct xe_reg pkg_power_sku_unit; @@ -866,6 +984,11 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) for (channel = 0; channel < CHANNEL_MAX; channel++) if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel)) xe_hwmon_energy_get(hwmon, channel, &energy); + + /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */ + for (channel = 0; channel < FAN_MAX; channel++) + if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel)) + xe_hwmon_fan_input_read(hwmon, channel, &fan_speed); } static void xe_hwmon_mutex_destroy(void *arg) diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 89393dcb53d9..63db66df064b 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level lmtt->ops->lmtt_pte_num(level)), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | - XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED); + XE_BO_FLAG_NEEDS_64K); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto out_free_pt; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index df3ceddede07..855c8acaf3f1 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -37,6 +37,7 @@ #define LRC_ENGINE_CLASS GENMASK_ULL(63, 61) #define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48) +#define LRC_PPHWSP_SIZE SZ_4K #define LRC_INDIRECT_RING_STATE_SIZE SZ_4K static struct xe_device * @@ -50,19 +51,22 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class) struct xe_device *xe = gt_to_xe(gt); size_t size; + /* Per-process HW status page (PPHWSP) */ + size = LRC_PPHWSP_SIZE; + + /* Engine context image */ switch (class) { case XE_ENGINE_CLASS_RENDER: if (GRAPHICS_VER(xe) >= 20) - size = 4 * SZ_4K; + size += 3 * SZ_4K; else - size = 14 * SZ_4K; + size += 13 * SZ_4K; break; case XE_ENGINE_CLASS_COMPUTE: - /* 14 pages since graphics_ver == 11 */ if (GRAPHICS_VER(xe) >= 20) - size = 3 * SZ_4K; + size += 2 * SZ_4K; else - size = 14 * SZ_4K; + size += 13 * SZ_4K; break; default: WARN(1, "Unknown engine class: %d", class); @@ -71,7 +75,7 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class) case XE_ENGINE_CLASS_VIDEO_DECODE: case XE_ENGINE_CLASS_VIDEO_ENHANCE: case XE_ENGINE_CLASS_OTHER: - size = 2 * SZ_4K; + size += 1 * SZ_4K; } /* Add indirect ring state page */ @@ -650,7 +654,6 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc) #define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8) #define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8) #define LRC_PARALLEL_PPHWSP_OFFSET 2048 -#define LRC_PPHWSP_SIZE SZ_4K u32 xe_lrc_regs_offset(struct xe_lrc *lrc) { @@ -893,6 +896,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, void *init_data = NULL; u32 arb_enable; u32 lrc_size; + u32 bo_flags; int err; kref_init(&lrc->refcount); @@ -901,15 +905,18 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, if (xe_gt_has_indirect_ring_state(gt)) lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE; + bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE; + if (vm && vm->xef) /* userspace */ + bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; + /* * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address * via VM bind calls. */ lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + bo_flags); if (IS_ERR(lrc->bo)) return PTR_ERR(lrc->bo); @@ -1445,6 +1452,7 @@ static int dump_gfxpipe_command(struct drm_printer *p, MATCH3D(3DSTATE_CLIP_MESH); MATCH3D(3DSTATE_SBE_MESH); MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER); + MATCH3D(3DSTATE_COARSE_PIXEL); MATCH3D(3DSTATE_DRAWING_RECTANGLE); MATCH3D(3DSTATE_CHROMA_KEY); diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c index 404fa2a456d5..49c45ec3e83c 100644 --- a/drivers/gpu/drm/xe/xe_memirq.c +++ b/drivers/gpu/drm/xe/xe_memirq.c @@ -86,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc) * This object needs to be 4KiB aligned. * * - _`Interrupt Source Report Page`: this is the equivalent of the - * GEN11_GT_INTR_DWx registers, with each bit in those registers being + * GT_INTR_DWx registers, with each bit in those registers being * mapped to a byte here. The offsets are the same, just bytes instead * of bits. This object needs to be cacheline aligned. * diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 5a3e89022c38..8f8e9fdfb2a8 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -97,7 +97,7 @@ struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile) return tile->migrate->q; } -static void xe_migrate_fini(struct drm_device *dev, void *arg) +static void xe_migrate_fini(void *arg) { struct xe_migrate *m = arg; @@ -209,7 +209,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, num_entries * XE_PAGE_SIZE, ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PINNED | XE_BO_FLAG_PAGETABLE); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -401,7 +400,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) struct xe_vm *vm; int err; - m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL); + m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL); if (!m) return ERR_PTR(-ENOMEM); @@ -455,7 +454,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) might_lock(&m->job_mutex); fs_reclaim_release(GFP_KERNEL); - err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m); + err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m); if (err) return ERR_PTR(err); @@ -670,6 +669,7 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, u32 mocs = 0; u32 tile_y = 0; + xe_gt_assert(gt, !(pitch & 3)); xe_gt_assert(gt, size / pitch <= S16_MAX); xe_gt_assert(gt, pitch / 4 <= S16_MAX); xe_gt_assert(gt, pitch <= U16_MAX); @@ -779,10 +779,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, bool dst_is_pltt = dst->mem_type == XE_PL_TT; bool src_is_vram = mem_type_is_vram(src->mem_type); bool dst_is_vram = mem_type_is_vram(dst->mem_type); + bool type_device = src_bo->ttm.type == ttm_bo_type_device; + bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe); bool copy_ccs = xe_device_has_flat_ccs(xe) && xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); - bool use_comp_pat = xe_device_has_flat_ccs(xe) && + bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) && GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram; /* Copying CCS between two different BOs is not supported yet. */ @@ -839,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, avail_pts, avail_pts); if (copy_system_ccs) { + xe_assert(xe, type_device); ccs_size = xe_device_ccs_bytes(xe, src_L0); batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, &ccs_pt, 0, @@ -849,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, /* Add copy commands size here */ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) + - ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0)); + ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0)); bb = xe_bb_new(gt, batch_size, usm); if (IS_ERR(bb)) { @@ -878,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (!copy_only_ccs) emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); - if (xe_migrate_needs_ccs_emit(xe)) + if (needs_ccs_emit) flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, IS_DGFX(xe) ? src_is_vram : src_is_pltt, dst_L0_ofs, @@ -1601,55 +1604,63 @@ enum xe_migrate_copy_dir { XE_MIGRATE_COPY_TO_SRAM, }; +#define XE_CACHELINE_BYTES 64ull +#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1) + static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, - unsigned long npages, + unsigned long len, + unsigned long sram_offset, dma_addr_t *sram_addr, u64 vram_addr, const enum xe_migrate_copy_dir dir) { struct xe_gt *gt = m->tile->primary_gt; struct xe_device *xe = gt_to_xe(gt); + bool use_usm_batch = xe->info.has_usm; struct dma_fence *fence = NULL; u32 batch_size = 2; u64 src_L0_ofs, dst_L0_ofs; - u64 round_update_size; struct xe_sched_job *job; struct xe_bb *bb; u32 update_idx, pt_slot = 0; + unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE); + unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ? + PAGE_SIZE : 4; int err; - if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER) - return ERR_PTR(-EINVAL); + if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) || + (sram_offset | vram_addr) & XE_CACHELINE_MASK)) + return ERR_PTR(-EOPNOTSUPP); + + xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER); - round_update_size = npages * PAGE_SIZE; - batch_size += pte_update_cmd_size(round_update_size); + batch_size += pte_update_cmd_size(len); batch_size += EMIT_COPY_DW; - bb = xe_bb_new(gt, batch_size, true); + bb = xe_bb_new(gt, batch_size, use_usm_batch); if (IS_ERR(bb)) { err = PTR_ERR(bb); return ERR_PTR(err); } build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, - sram_addr, round_update_size); + sram_addr, len + sram_offset); if (dir == XE_MIGRATE_COPY_TO_VRAM) { - src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0); + src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); } else { src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); - dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0); + dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; } bb->cs[bb->len++] = MI_BATCH_BUFFER_END; update_idx = bb->len; - emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size, - XE_PAGE_SIZE); + emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch); job = xe_bb_create_migration_job(m->q, bb, - xe_migrate_batch_base(m, true), + xe_migrate_batch_base(m, use_usm_batch), update_idx); if (IS_ERR(job)) { err = PTR_ERR(job); @@ -1694,7 +1705,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, dma_addr_t *src_addr, u64 dst_addr) { - return xe_migrate_vram(m, npages, src_addr, dst_addr, + return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr, XE_MIGRATE_COPY_TO_VRAM); } @@ -1715,10 +1726,193 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, u64 src_addr, dma_addr_t *dst_addr) { - return xe_migrate_vram(m, npages, dst_addr, src_addr, + return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr, XE_MIGRATE_COPY_TO_SRAM); } +static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr, + int len, int write) +{ + unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE); + + for (i = 0; i < npages; ++i) { + if (!dma_addr[i]) + break; + + dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE, + write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + } + kfree(dma_addr); +} + +static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe, + void *buf, int len, int write) +{ + dma_addr_t *dma_addr; + unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE); + + dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL); + if (!dma_addr) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < npages; ++i) { + dma_addr_t addr; + struct page *page; + + if (is_vmalloc_addr(buf)) + page = vmalloc_to_page(buf); + else + page = virt_to_page(buf); + + addr = dma_map_page(xe->drm.dev, + page, 0, PAGE_SIZE, + write ? DMA_TO_DEVICE : + DMA_FROM_DEVICE); + if (dma_mapping_error(xe->drm.dev, addr)) + goto err_fault; + + dma_addr[i] = addr; + buf += PAGE_SIZE; + } + + return dma_addr; + +err_fault: + xe_migrate_dma_unmap(xe, dma_addr, len, write); + return ERR_PTR(-EFAULT); +} + +/** + * xe_migrate_access_memory - Access memory of a BO via GPU + * + * @m: The migration context. + * @bo: buffer object + * @offset: access offset into buffer object + * @buf: pointer to caller memory to read into or write from + * @len: length of access + * @write: write access + * + * Access memory of a BO via GPU either reading in or writing from a passed in + * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to + * read to or write from pointer. + * + * Returns: + * 0 if successful, negative error code on failure. + */ +int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, + unsigned long offset, void *buf, int len, + int write) +{ + struct xe_tile *tile = m->tile; + struct xe_device *xe = tile_to_xe(tile); + struct xe_res_cursor cursor; + struct dma_fence *fence = NULL; + dma_addr_t *dma_addr; + unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK; + int bytes_left = len, current_page = 0; + void *orig_buf = buf; + + xe_bo_assert_held(bo); + + /* Use bounce buffer for small access and unaligned access */ + if (len & XE_CACHELINE_MASK || + ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) { + int buf_offset = 0; + + /* + * Less than ideal for large unaligned access but this should be + * fairly rare, can fixup if this becomes common. + */ + do { + u8 bounce[XE_CACHELINE_BYTES]; + void *ptr = (void *)bounce; + int err; + int copy_bytes = min_t(int, bytes_left, + XE_CACHELINE_BYTES - + (offset & XE_CACHELINE_MASK)); + int ptr_offset = offset & XE_CACHELINE_MASK; + + err = xe_migrate_access_memory(m, bo, + offset & + ~XE_CACHELINE_MASK, + (void *)ptr, + sizeof(bounce), 0); + if (err) + return err; + + if (write) { + memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); + + err = xe_migrate_access_memory(m, bo, + offset & ~XE_CACHELINE_MASK, + (void *)ptr, + sizeof(bounce), 0); + if (err) + return err; + } else { + memcpy(buf + buf_offset, ptr + ptr_offset, + copy_bytes); + } + + bytes_left -= copy_bytes; + buf_offset += copy_bytes; + offset += copy_bytes; + } while (bytes_left); + + return 0; + } + + dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); + if (IS_ERR(dma_addr)) + return PTR_ERR(dma_addr); + + xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor); + + do { + struct dma_fence *__fence; + u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) + + cursor.start; + int current_bytes; + + if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER) + current_bytes = min_t(int, bytes_left, + MAX_PREEMPTDISABLE_TRANSFER); + else + current_bytes = min_t(int, bytes_left, cursor.size); + + if (fence) + dma_fence_put(fence); + + __fence = xe_migrate_vram(m, current_bytes, + (unsigned long)buf & ~PAGE_MASK, + dma_addr + current_page, + vram_addr, write ? + XE_MIGRATE_COPY_TO_VRAM : + XE_MIGRATE_COPY_TO_SRAM); + if (IS_ERR(__fence)) { + if (fence) + dma_fence_wait(fence, false); + fence = __fence; + goto out_err; + } + fence = __fence; + + buf += current_bytes; + offset += current_bytes; + current_page = (int)(buf - orig_buf) / PAGE_SIZE; + bytes_left -= current_bytes; + if (bytes_left) + xe_res_next(&cursor, current_bytes); + } while (bytes_left); + + dma_fence_wait(fence, false); + dma_fence_put(fence); + +out_err: + xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write); + return IS_ERR(fence) ? PTR_ERR(fence) : 0; +} + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) #include "tests/xe_migrate.c" #endif diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 6ff9a963425c..fb9839c1bae0 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -112,6 +112,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct ttm_resource *dst, bool copy_only_ccs); +int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, + unsigned long offset, void *buf, int len, + int write); + #define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0) #define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1) #define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \ diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 70a36e777546..096c38cc51c8 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -138,6 +138,7 @@ int xe_mmio_probe_early(struct xe_device *xe) return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); } +ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */ /** * xe_mmio_init() - Initialize an MMIO instance @@ -204,8 +205,9 @@ void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val) trace_xe_reg_rw(mmio, true, addr, val, sizeof(val)); - if (!reg.vf && mmio->sriov_vf_gt) - xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); + if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe)) + xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?: + mmio->tile->primary_gt, reg, val); else writel(val, mmio->regs + addr); } @@ -218,8 +220,9 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) /* Wa_15015404425 */ mmio_flush_pending_writes(mmio); - if (!reg.vf && mmio->sriov_vf_gt) - val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); + if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe)) + val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?: + mmio->tile->primary_gt, reg); else val = readl(mmio->regs + addr); diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index 9f4632e39a1a..64bf46646544 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -11,6 +11,7 @@ #include <drm/drm_module.h> #include "xe_drv.h" +#include "xe_configfs.h" #include "xe_hw_fence.h" #include "xe_pci.h" #include "xe_pm.h" @@ -38,8 +39,8 @@ MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); module_param_named(probe_display, xe_modparam.probe_display, bool, 0444); MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)"); -module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600); -MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)"); +module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600); +MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size"); module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600); MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)"); @@ -89,6 +90,10 @@ static const struct init_funcs init_funcs[] = { .init = xe_check_nomodeset, }, { + .init = xe_configfs_init, + .exit = xe_configfs_exit, + }, + { .init = xe_hw_fence_module_init, .exit = xe_hw_fence_module_exit, }, diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 7ffc98f67e69..346f357b3d1f 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -2221,6 +2221,7 @@ addr_err: kfree(oa_regs); return ERR_PTR(err); } +ALLOW_ERROR_INJECTION(xe_oa_alloc_regs, ERRNO); static ssize_t show_dynamic_id(struct kobject *kobj, struct kobj_attribute *attr, diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 818f023166d5..882398e09b7e 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -62,11 +62,13 @@ struct xe_device_desc { u8 is_dgfx:1; u8 has_display:1; + u8 has_fan_control:1; u8 has_heci_gscfi:1; u8 has_heci_cscfi:1; u8 has_llc:1; u8 has_pxp:1; u8 has_sriov:1; + u8 needs_scratch:1; u8 skip_guc_pc:1; u8 skip_mtcfg:1; u8 skip_pcode:1; @@ -302,6 +304,7 @@ static const struct xe_device_desc dg2_desc = { DG2_FEATURES, .has_display = true, + .has_fan_control = true, }; static const __maybe_unused struct xe_device_desc pvc_desc = { @@ -329,6 +332,7 @@ static const struct xe_device_desc lnl_desc = { .dma_mask_size = 46, .has_display = true, .has_pxp = true, + .needs_scratch = true, }; static const struct xe_device_desc bmg_desc = { @@ -336,7 +340,9 @@ static const struct xe_device_desc bmg_desc = { PLATFORM(BATTLEMAGE), .dma_mask_size = 46, .has_display = true, + .has_fan_control = true, .has_heci_cscfi = 1, + .needs_scratch = true, }; static const struct xe_device_desc ptl_desc = { @@ -345,6 +351,7 @@ static const struct xe_device_desc ptl_desc = { .has_display = true, .has_sriov = true, .require_force_probe = true, + .needs_scratch = true, }; #undef PLATFORM @@ -575,6 +582,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.dma_mask_size = desc->dma_mask_size; xe->info.is_dgfx = desc->is_dgfx; + xe->info.has_fan_control = desc->has_fan_control; xe->info.has_heci_gscfi = desc->has_heci_gscfi; xe->info.has_heci_cscfi = desc->has_heci_cscfi; xe->info.has_llc = desc->has_llc; @@ -583,6 +591,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.skip_guc_pc = desc->skip_guc_pc; xe->info.skip_mtcfg = desc->skip_mtcfg; xe->info.skip_pcode = desc->skip_pcode; + xe->info.needs_scratch = desc->needs_scratch; xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && xe_modparam.probe_display && @@ -733,7 +742,7 @@ static void xe_pci_remove(struct pci_dev *pdev) return; xe_device_remove(xe); - xe_pm_runtime_fini(xe); + xe_pm_fini(xe); } /* @@ -803,18 +812,17 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; err = xe_device_probe_early(xe); - if (err) { - /* - * In Boot Survivability mode, no drm card is exposed and driver - * is loaded with bare minimum to allow for firmware to be - * flashed through mei. If early probe failed, but it managed to - * enable survivability mode, return success. - */ - if (xe_survivability_mode_is_enabled(xe)) - return 0; + /* + * In Boot Survivability mode, no drm card is exposed and driver + * is loaded with bare minimum to allow for firmware to be + * flashed through mei. Return success, if survivability mode + * is enabled due to pcode failure or configfs being set + */ + if (xe_survivability_mode_is_enabled(xe)) + return 0; + if (err) return err; - } err = xe_info_init(xe, desc); if (err) @@ -920,6 +928,7 @@ static int xe_pci_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); return 0; } diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index 09ee8a06fe2e..8813efdcafbb 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -7,6 +7,8 @@ #include "xe_device.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" +#include "xe_gt_sriov_printk.h" +#include "xe_guc_engine_activity.h" #include "xe_pci_sriov.h" #include "xe_pm.h" #include "xe_sriov.h" @@ -111,6 +113,20 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs) } } +static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable) +{ + struct xe_gt *gt; + unsigned int id; + int ret = 0; + + for_each_gt(gt, xe, id) { + ret = xe_guc_engine_activity_function_stats(>->uc.guc, num_vfs, enable); + if (ret) + xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n", + str_enable_disable(enable), ERR_PTR(ret)); + } +} + static int pf_enable_vfs(struct xe_device *xe, int num_vfs) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -145,6 +161,9 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) xe_sriov_info(xe, "Enabled %u of %u VF%s\n", num_vfs, total_vfs, str_plural(total_vfs)); + + pf_engine_activity_stats(xe, num_vfs, true); + return num_vfs; failed: @@ -168,6 +187,8 @@ static int pf_disable_vfs(struct xe_device *xe) if (!num_vfs) return 0; + pf_engine_activity_stats(xe, num_vfs, false); + pci_disable_sriov(pdev); pf_reset_vfs(xe, num_vfs); diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index 9333ce776a6e..cf955b3ed52c 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -7,6 +7,7 @@ #include <linux/delay.h> #include <linux/errno.h> +#include <linux/error-injection.h> #include <drm/drm_managed.h> @@ -323,3 +324,4 @@ int xe_pcode_probe_early(struct xe_device *xe) { return xe_pcode_ready(xe, false); } +ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */ diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h index 2bae9afdbd35..e622ae17f08d 100644 --- a/drivers/gpu/drm/xe/xe_pcode_api.h +++ b/drivers/gpu/drm/xe/xe_pcode_api.h @@ -49,6 +49,9 @@ /* Domain IDs (param2) */ #define PCODE_MBOX_DOMAIN_HBM 0x2 +#define FAN_SPEED_CONTROL 0x7D +#define FSC_READ_NUM_FANS 0x4 + #define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4)) /* PCODE_SCRATCH0 */ #define AUXINFO_REG_OFFSET REG_GENMASK(17, 15) diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 7b6b754ad6eb..38514cef817e 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -188,7 +188,7 @@ int xe_pm_resume(struct xe_device *xe) * This only restores pinned memory which is the memory required for the * GT(s) to resume. */ - err = xe_bo_restore_kernel(xe); + err = xe_bo_restore_early(xe); if (err) goto err; @@ -199,7 +199,7 @@ int xe_pm_resume(struct xe_device *xe) xe_display_pm_resume(xe); - err = xe_bo_restore_user(xe); + err = xe_bo_restore_late(xe); if (err) goto err; @@ -286,6 +286,42 @@ static u32 vram_threshold_value(struct xe_device *xe) return DEFAULT_VRAM_THRESHOLD; } +static int xe_pm_notifier_callback(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier); + int err = 0; + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + xe_pm_runtime_get(xe); + err = xe_bo_evict_all_user(xe); + if (err) { + drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err); + xe_pm_runtime_put(xe); + break; + } + + err = xe_bo_notifier_prepare_all_pinned(xe); + if (err) { + drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err); + xe_pm_runtime_put(xe); + } + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + xe_bo_notifier_unprepare_all_pinned(xe); + xe_pm_runtime_put(xe); + break; + } + + if (err) + return NOTIFY_BAD; + + return NOTIFY_DONE; +} + /** * xe_pm_init - Initialize Xe Power Management * @xe: xe device instance @@ -299,6 +335,11 @@ int xe_pm_init(struct xe_device *xe) u32 vram_threshold; int err; + xe->pm_notifier.notifier_call = xe_pm_notifier_callback; + err = register_pm_notifier(&xe->pm_notifier); + if (err) + return err; + /* For now suspend/resume is only allowed with GuC */ if (!xe_device_uc_enabled(xe)) return 0; @@ -308,24 +349,23 @@ int xe_pm_init(struct xe_device *xe) if (xe->d3cold.capable) { err = xe_device_sysfs_init(xe); if (err) - return err; + goto err_unregister; vram_threshold = vram_threshold_value(xe); err = xe_pm_set_vram_threshold(xe, vram_threshold); if (err) - return err; + goto err_unregister; } xe_pm_runtime_init(xe); - return 0; + +err_unregister: + unregister_pm_notifier(&xe->pm_notifier); + return err; } -/** - * xe_pm_runtime_fini - Finalize Runtime PM - * @xe: xe device instance - */ -void xe_pm_runtime_fini(struct xe_device *xe) +static void xe_pm_runtime_fini(struct xe_device *xe) { struct device *dev = xe->drm.dev; @@ -333,6 +373,18 @@ void xe_pm_runtime_fini(struct xe_device *xe) pm_runtime_forbid(dev); } +/** + * xe_pm_fini - Finalize PM + * @xe: xe device instance + */ +void xe_pm_fini(struct xe_device *xe) +{ + if (xe_device_uc_enabled(xe)) + xe_pm_runtime_fini(xe); + + unregister_pm_notifier(&xe->pm_notifier); +} + static void xe_pm_write_callback_task(struct xe_device *xe, struct task_struct *task) { @@ -484,7 +536,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) * This only restores pinned memory which is the memory * required for the GT(s) to resume. */ - err = xe_bo_restore_kernel(xe); + err = xe_bo_restore_early(xe); if (err) goto out; } @@ -497,7 +549,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) xe_display_pm_runtime_resume(xe); if (xe->d3cold.allowed) { - err = xe_bo_restore_user(xe); + err = xe_bo_restore_late(xe); if (err) goto out; } diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 998d1ed64556..59678b310e55 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe); int xe_pm_init_early(struct xe_device *xe); int xe_pm_init(struct xe_device *xe); -void xe_pm_runtime_fini(struct xe_device *xe); +void xe_pm_fini(struct xe_device *xe); bool xe_pm_runtime_suspended(struct xe_device *xe); int xe_pm_runtime_suspend(struct xe_device *xe); int xe_pm_runtime_resume(struct xe_device *xe); diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c index 4f62a6e515d6..69df0e3520a5 100644 --- a/drivers/gpu/drm/xe/xe_pmu.c +++ b/drivers/gpu/drm/xe/xe_pmu.c @@ -10,9 +10,11 @@ #include "xe_force_wake.h" #include "xe_gt_idle.h" #include "xe_guc_engine_activity.h" +#include "xe_guc_pc.h" #include "xe_hw_engine.h" #include "xe_pm.h" #include "xe_pmu.h" +#include "xe_sriov_pf_helpers.h" /** * DOC: Xe PMU (Performance Monitoring Unit) @@ -32,9 +34,10 @@ * gt[60:63] Selects gt for the event * engine_class[20:27] Selects engine-class for event * engine_instance[12:19] Selects the engine-instance for the event + * function[44:59] Selects the function of the event (SRIOV enabled) * * For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be - * set as populated by DRM_XE_DEVICE_QUERY_ENGINES. + * set as populated by DRM_XE_DEVICE_QUERY_ENGINES and function if SRIOV is enabled. * * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0. * @@ -49,6 +52,7 @@ */ #define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60) +#define XE_PMU_EVENT_FUNCTION_MASK GENMASK_ULL(59, 44) #define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20) #define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12) #define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0) @@ -58,6 +62,11 @@ static unsigned int config_to_event_id(u64 config) return FIELD_GET(XE_PMU_EVENT_ID_MASK, config); } +static unsigned int config_to_function_id(u64 config) +{ + return FIELD_GET(XE_PMU_EVENT_FUNCTION_MASK, config); +} + static unsigned int config_to_engine_class(u64 config) { return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config); @@ -76,6 +85,8 @@ static unsigned int config_to_gt_id(u64 config) #define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01 #define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02 #define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03 +#define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04 +#define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05 static struct xe_gt *event_to_gt(struct perf_event *event) { @@ -111,6 +122,14 @@ static bool is_engine_event(u64 config) event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS); } +static bool is_gt_frequency_event(struct perf_event *event) +{ + u32 id = config_to_event_id(event->attr.config); + + return id == XE_PMU_EVENT_GT_ACTUAL_FREQUENCY || + id == XE_PMU_EVENT_GT_REQUESTED_FREQUENCY; +} + static bool event_gt_forcewake(struct perf_event *event) { struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base); @@ -118,7 +137,7 @@ static bool event_gt_forcewake(struct perf_event *event) struct xe_gt *gt; unsigned int *fw_ref; - if (!is_engine_event(config)) + if (!is_engine_event(config) && !is_gt_frequency_event(event)) return true; gt = xe_device_get_gt(xe, config_to_gt_id(config)); @@ -151,7 +170,7 @@ static bool event_supported(struct xe_pmu *pmu, unsigned int gt, static bool event_param_valid(struct perf_event *event) { struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base); - unsigned int engine_class, engine_instance; + unsigned int engine_class, engine_instance, function_id; u64 config = event->attr.config; struct xe_gt *gt; @@ -161,16 +180,28 @@ static bool event_param_valid(struct perf_event *event) engine_class = config_to_engine_class(config); engine_instance = config_to_engine_instance(config); + function_id = config_to_function_id(config); switch (config_to_event_id(config)) { case XE_PMU_EVENT_GT_C6_RESIDENCY: - if (engine_class || engine_instance) + case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY: + case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY: + if (engine_class || engine_instance || function_id) return false; break; case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS: case XE_PMU_EVENT_ENGINE_TOTAL_TICKS: if (!event_to_hwe(event)) return false; + + /* PF(0) and total vfs when SRIOV is enabled */ + if (IS_SRIOV_PF(xe)) { + if (function_id > xe_sriov_pf_get_totalvfs(xe)) + return false; + } else if (function_id) { + return false; + } + break; } @@ -242,13 +273,17 @@ static int xe_pmu_event_init(struct perf_event *event) static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event) { struct xe_hw_engine *hwe; - u64 val = 0; + unsigned int function_id; + u64 config, val = 0; + + config = event->attr.config; + function_id = config_to_function_id(config); hwe = event_to_hwe(event); - if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS) - val = xe_guc_engine_activity_active_ticks(>->uc.guc, hwe); + if (config_to_event_id(config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS) + val = xe_guc_engine_activity_active_ticks(>->uc.guc, hwe, function_id); else - val = xe_guc_engine_activity_total_ticks(>->uc.guc, hwe); + val = xe_guc_engine_activity_total_ticks(>->uc.guc, hwe, function_id); return val; } @@ -266,6 +301,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event) case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS: case XE_PMU_EVENT_ENGINE_TOTAL_TICKS: return read_engine_events(gt, event); + case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY: + return xe_guc_pc_get_act_freq(>->uc.guc.pc); + case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY: + return xe_guc_pc_get_cur_freq_fw(>->uc.guc.pc); } return 0; @@ -281,7 +320,14 @@ static void xe_pmu_event_update(struct perf_event *event) new = __xe_pmu_event_read(event); } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new)); - local64_add(new - prev, &event->count); + /* + * GT frequency is not a monotonically increasing counter, so add the + * instantaneous value instead. + */ + if (is_gt_frequency_event(event)) + local64_add(new, &event->count); + else + local64_add(new - prev, &event->count); } static void xe_pmu_event_read(struct perf_event *event) @@ -351,6 +397,7 @@ static void xe_pmu_event_del(struct perf_event *event, int flags) } PMU_FORMAT_ATTR(gt, "config:60-63"); +PMU_FORMAT_ATTR(function, "config:44-59"); PMU_FORMAT_ATTR(engine_class, "config:20-27"); PMU_FORMAT_ATTR(engine_instance, "config:12-19"); PMU_FORMAT_ATTR(event, "config:0-11"); @@ -359,6 +406,7 @@ static struct attribute *pmu_format_attrs[] = { &format_attr_event.attr, &format_attr_engine_class.attr, &format_attr_engine_instance.attr, + &format_attr_function.attr, &format_attr_gt.attr, NULL, }; @@ -419,6 +467,10 @@ static ssize_t event_attr_show(struct device *dev, XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms"); XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS); XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS); +XE_EVENT_ATTR_SIMPLE(gt-actual-frequency, gt_actual_frequency, + XE_PMU_EVENT_GT_ACTUAL_FREQUENCY, "MHz"); +XE_EVENT_ATTR_SIMPLE(gt-requested-frequency, gt_requested_frequency, + XE_PMU_EVENT_GT_REQUESTED_FREQUENCY, "MHz"); static struct attribute *pmu_empty_event_attrs[] = { /* Empty - all events are added as groups with .attr_update() */ @@ -434,6 +486,8 @@ static const struct attribute_group *pmu_events_attr_update[] = { &pmu_group_gt_c6_residency, &pmu_group_engine_active_ticks, &pmu_group_engine_total_ticks, + &pmu_group_gt_actual_frequency, + &pmu_group_gt_requested_frequency, NULL, }; @@ -442,8 +496,11 @@ static void set_supported_events(struct xe_pmu *pmu) struct xe_device *xe = container_of(pmu, typeof(*xe), pmu); struct xe_gt *gt = xe_device_get_gt(xe, 0); - if (!xe->info.skip_guc_pc) + if (!xe->info.skip_guc_pc) { pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY); + pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_ACTUAL_FREQUENCY); + pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY); + } if (xe_guc_engine_activity_supported(>->uc.guc)) { pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index ffaf0d02dc7d..b42cf5d1b20c 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -103,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, { struct xe_pt *pt; struct xe_bo *bo; + u32 bo_flags; int err; if (level) { @@ -115,14 +116,16 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, if (!pt) return ERR_PTR(-ENOMEM); + bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | + XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE; + if (vm->xef) /* userspace */ + bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; + pt->level = level; bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE | - XE_BO_FLAG_PINNED | - XE_BO_FLAG_NO_RESV_EVICT | - XE_BO_FLAG_PAGETABLE); + bo_flags); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto err_kfree; @@ -269,8 +272,11 @@ struct xe_pt_update { bool preexisting; }; +/** + * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk. + */ struct xe_pt_stage_bind_walk { - /** base: The base class. */ + /** @base: The base class. */ struct xe_pt_walk base; /* Input parameters for the walk */ @@ -278,15 +284,19 @@ struct xe_pt_stage_bind_walk { struct xe_vm *vm; /** @tile: The tile we're building for. */ struct xe_tile *tile; - /** @default_pte: PTE flag only template. No address is associated */ - u64 default_pte; + /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */ + u64 default_vram_pte; + /** @default_system_pte: PTE flag only template for System. No address is associated */ + u64 default_system_pte; /** @dma_offset: DMA offset to add to the PTE. */ u64 dma_offset; /** - * @needs_64k: This address range enforces 64K alignment and - * granularity. + * @needs_64K: This address range enforces 64K alignment and + * granularity on VRAM. */ bool needs_64K; + /** @clear_pt: clear page table entries during the bind walk */ + bool clear_pt; /** * @vma: VMA being mapped */ @@ -299,6 +309,7 @@ struct xe_pt_stage_bind_walk { u64 va_curs_start; /* Output */ + /** @wupd: Walk output data for page-table updates. */ struct xe_walk_update { /** @wupd.entries: Caller provided storage. */ struct xe_vm_pgtable_update *entries; @@ -316,7 +327,7 @@ struct xe_pt_stage_bind_walk { u64 l0_end_addr; /** @addr_64K: The start address of the current 64K chunk. */ u64 addr_64K; - /** @found_64: Whether @add_64K actually points to a 64K chunk. */ + /** @found_64K: Whether @add_64K actually points to a 64K chunk. */ bool found_64K; }; @@ -436,6 +447,10 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level, if (xe_vma_is_null(xe_walk->vma)) return true; + /* if we are clearing page table, no dma addresses*/ + if (xe_walk->clear_pt) + return true; + /* Is the DMA address huge PTE size aligned? */ size = next - addr; dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs); @@ -515,24 +530,35 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) { struct xe_res_cursor *curs = xe_walk->curs; bool is_null = xe_vma_is_null(xe_walk->vma); + bool is_vram = is_null ? false : xe_res_is_vram(curs); XE_WARN_ON(xe_walk->va_curs_start != addr); - pte = vm->pt_ops->pte_encode_vma(is_null ? 0 : - xe_res_dma(curs) + xe_walk->dma_offset, - xe_walk->vma, pat_index, level); - pte |= xe_walk->default_pte; + if (xe_walk->clear_pt) { + pte = 0; + } else { + pte = vm->pt_ops->pte_encode_vma(is_null ? 0 : + xe_res_dma(curs) + + xe_walk->dma_offset, + xe_walk->vma, + pat_index, level); + if (!is_null) + pte |= is_vram ? xe_walk->default_vram_pte : + xe_walk->default_system_pte; - /* - * Set the XE_PTE_PS64 hint if possible, otherwise if - * this device *requires* 64K PTE size for VRAM, fail. - */ - if (level == 0 && !xe_parent->is_compact) { - if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { - xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K; - pte |= XE_PTE_PS64; - } else if (XE_WARN_ON(xe_walk->needs_64K)) { - return -EINVAL; + /* + * Set the XE_PTE_PS64 hint if possible, otherwise if + * this device *requires* 64K PTE size for VRAM, fail. + */ + if (level == 0 && !xe_parent->is_compact) { + if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { + xe_walk->vma->gpuva.flags |= + XE_VMA_PTE_64K; + pte |= XE_PTE_PS64; + } else if (XE_WARN_ON(xe_walk->needs_64K && + is_vram)) { + return -EINVAL; + } } } @@ -540,7 +566,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, if (unlikely(ret)) return ret; - if (!is_null) + if (!is_null && !xe_walk->clear_pt) xe_res_next(curs, next - addr); xe_walk->va_curs_start = next; xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level); @@ -603,6 +629,44 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { .pt_entry = xe_pt_stage_bind_entry, }; +/* + * Default atomic expectations for different allocation scenarios are as follows: + * + * 1. Traditional API: When the VM is not in LR mode: + * - Device atomics are expected to function with all allocations. + * + * 2. Compute/SVM API: When the VM is in LR mode: + * - Device atomics are the default behavior when the bo is placed in a single region. + * - In all other cases device atomics will be disabled with AE=0 until an application + * request differently using a ioctl like madvise. + */ +static bool xe_atomic_for_vram(struct xe_vm *vm) +{ + return true; +} + +static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo) +{ + struct xe_device *xe = vm->xe; + + if (!xe->info.has_device_atomics_on_smem) + return false; + + /* + * If a SMEM+LMEM allocation is backed by SMEM, a device + * atomics will cause a gpu page fault and which then + * gets migrated to LMEM, bind such allocations with + * device atomics enabled. + * + * TODO: Revisit this. Perhaps add something like a + * fault_on_atomics_in_system UAPI flag. + * Note that this also prohibits GPU atomics in LR mode for + * userptr and system memory on DGFX. + */ + return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) || + (bo && xe_bo_has_single_placement(bo)))); +} + /** * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address * range. @@ -612,6 +676,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { * @entries: Storage for the update entries used for connecting the tree to * the main tree at commit time. * @num_entries: On output contains the number of @entries used. + * @clear_pt: Clear the page table entries. * * This function builds a disconnected page-table tree for a given address * range. The tree is connected to the main vm tree for the gpu using @@ -625,13 +690,13 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = { static int xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, struct xe_svm_range *range, - struct xe_vm_pgtable_update *entries, u32 *num_entries) + struct xe_vm_pgtable_update *entries, + u32 *num_entries, bool clear_pt) { struct xe_device *xe = tile_to_xe(tile); struct xe_bo *bo = xe_vma_bo(vma); - bool is_devmem = !xe_vma_is_userptr(vma) && bo && - (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)); struct xe_res_cursor curs; + struct xe_vm *vm = xe_vma_vm(vma); struct xe_pt_stage_bind_walk xe_walk = { .base = { .ops = &xe_pt_stage_bind_ops, @@ -639,34 +704,31 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, .max_level = XE_PT_HIGHEST_LEVEL, .staging = true, }, - .vm = xe_vma_vm(vma), + .vm = vm, .tile = tile, .curs = &curs, .va_curs_start = range ? range->base.itree.start : xe_vma_start(vma), .vma = vma, .wupd.entries = entries, + .clear_pt = clear_pt, }; - struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; + struct xe_pt *pt = vm->pt_root[tile->id]; int ret; if (range) { /* Move this entire thing to xe_svm.c? */ - xe_svm_notifier_lock(xe_vma_vm(vma)); + xe_svm_notifier_lock(vm); if (!xe_svm_range_pages_valid(range)) { xe_svm_range_debug(range, "BIND PREPARE - RETRY"); - xe_svm_notifier_unlock(xe_vma_vm(vma)); + xe_svm_notifier_unlock(vm); return -EAGAIN; } if (xe_svm_range_has_dma_mapping(range)) { xe_res_first_dma(range->base.dma_addr, 0, range->base.itree.last + 1 - range->base.itree.start, &curs); - is_devmem = xe_res_is_vram(&curs); - if (is_devmem) - xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM"); - else - xe_svm_range_debug(range, "BIND PREPARE - DMA"); + xe_svm_range_debug(range, "BIND PREPARE - MIXED"); } else { xe_assert(xe, false); } @@ -674,54 +736,21 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, * Note, when unlocking the resource cursor dma addresses may become * stale, but the bind will be aborted anyway at commit time. */ - xe_svm_notifier_unlock(xe_vma_vm(vma)); + xe_svm_notifier_unlock(vm); } - xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem; + xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K); + if (clear_pt) + goto walk_pt; - /** - * Default atomic expectations for different allocation scenarios are as follows: - * - * 1. Traditional API: When the VM is not in LR mode: - * - Device atomics are expected to function with all allocations. - * - * 2. Compute/SVM API: When the VM is in LR mode: - * - Device atomics are the default behavior when the bo is placed in a single region. - * - In all other cases device atomics will be disabled with AE=0 until an application - * request differently using a ioctl like madvise. - */ if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) { - if (xe_vm_in_lr_mode(xe_vma_vm(vma))) { - if (bo && xe_bo_has_single_placement(bo)) - xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; - /** - * If a SMEM+LMEM allocation is backed by SMEM, a device - * atomics will cause a gpu page fault and which then - * gets migrated to LMEM, bind such allocations with - * device atomics enabled. - */ - else if (is_devmem) - xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; - } else { - xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; - } - - /** - * Unset AE if the platform(PVC) doesn't support it on an - * allocation - */ - if (!xe->info.has_device_atomics_on_smem && !is_devmem) - xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE; - } - - if (is_devmem) { - xe_walk.default_pte |= XE_PPGTT_PTE_DM; - xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0; + xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0; + xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ? + XE_USM_PPGTT_PTE_AE : 0; } - if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo)) - xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo)); - + xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM; + xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0; if (!range) xe_bo_assert_held(bo); @@ -739,6 +768,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, curs.size = xe_vma_size(vma); } +walk_pt: ret = xe_pt_walk_range(&pt->base, pt->level, range ? range->base.itree.start : xe_vma_start(vma), range ? range->base.itree.last + 1 : xe_vma_end(vma), @@ -1103,12 +1133,14 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries, static int xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, struct xe_svm_range *range, - struct xe_vm_pgtable_update *entries, u32 *num_entries) + struct xe_vm_pgtable_update *entries, + u32 *num_entries, bool invalidate_on_bind) { int err; *num_entries = 0; - err = xe_pt_stage_bind(tile, vma, range, entries, num_entries); + err = xe_pt_stage_bind(tile, vma, range, entries, num_entries, + invalidate_on_bind); if (!err) xe_tile_assert(tile, *num_entries); @@ -1420,6 +1452,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) return err; } +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) { struct xe_vm *vm = pt_update->vops->vm; @@ -1453,6 +1486,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) return 0; } +#endif struct invalidation_fence { struct xe_gt_tlb_invalidation_fence base; @@ -1791,7 +1825,7 @@ static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma) static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, struct xe_vm_pgtable_update_ops *pt_update_ops, - struct xe_vma *vma) + struct xe_vma *vma, bool invalidate_on_bind) { u32 current_op = pt_update_ops->current_op; struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op]; @@ -1813,7 +1847,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, return err; err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries, - &pt_op->num_entries); + &pt_op->num_entries, invalidate_on_bind); if (!err) { xe_tile_assert(tile, pt_op->num_entries <= ARRAY_SIZE(pt_op->entries)); @@ -1835,11 +1869,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, * If !rebind, and scratch enabled VMs, there is a chance the scratch * PTE is already cached in the TLB so it needs to be invalidated. * On !LR VMs this is done in the ring ops preceding a batch, but on - * non-faulting LR, in particular on user-space batch buffer chaining, - * it needs to be done here. + * LR, in particular on user-space batch buffer chaining, it needs to + * be done here. */ if ((!pt_op->rebind && xe_vm_has_scratch(vm) && - xe_vm_in_preempt_fence_mode(vm))) + xe_vm_in_lr_mode(vm))) pt_update_ops->needs_invalidation = true; else if (pt_op->rebind && !xe_vm_in_lr_mode(vm)) /* We bump also if batch_invalidate_tlb is true */ @@ -1875,7 +1909,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile, pt_op->rebind = BIT(tile->id) & range->tile_present; err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries, - &pt_op->num_entries); + &pt_op->num_entries, false); if (!err) { xe_tile_assert(tile, pt_op->num_entries <= ARRAY_SIZE(pt_op->entries)); @@ -1987,11 +2021,13 @@ static int op_prepare(struct xe_vm *vm, switch (op->base.op) { case DRM_GPUVA_OP_MAP: - if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) || + if ((!op->map.immediate && xe_vm_in_fault_mode(vm) && + !op->map.invalidate_on_bind) || op->map.is_cpu_addr_mirror) break; - err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma); + err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma, + op->map.invalidate_on_bind); pt_update_ops->wait_vm_kernel = true; break; case DRM_GPUVA_OP_REMAP: @@ -2005,12 +2041,12 @@ static int op_prepare(struct xe_vm *vm, if (!err && op->remap.prev) { err = bind_op_prepare(vm, tile, pt_update_ops, - op->remap.prev); + op->remap.prev, false); pt_update_ops->wait_vm_bookkeep = true; } if (!err && op->remap.next) { err = bind_op_prepare(vm, tile, pt_update_ops, - op->remap.next); + op->remap.next, false); pt_update_ops->wait_vm_bookkeep = true; } break; @@ -2032,7 +2068,7 @@ static int op_prepare(struct xe_vm *vm, if (xe_vma_is_cpu_addr_mirror(vma)) break; - err = bind_op_prepare(vm, tile, pt_update_ops, vma); + err = bind_op_prepare(vm, tile, pt_update_ops, vma, false); pt_update_ops->wait_vm_kernel = true; break; } @@ -2115,7 +2151,7 @@ ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO); static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, struct xe_vm_pgtable_update_ops *pt_update_ops, struct xe_vma *vma, struct dma_fence *fence, - struct dma_fence *fence2) + struct dma_fence *fence2, bool invalidate_on_bind) { xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma)); @@ -2132,6 +2168,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, } vma->tile_present |= BIT(tile->id); vma->tile_staged &= ~BIT(tile->id); + if (invalidate_on_bind) + vma->tile_invalidated |= BIT(tile->id); if (xe_vma_is_userptr(vma)) { lockdep_assert_held_read(&vm->userptr.notifier_lock); to_userptr_vma(vma)->userptr.initial_bind = true; @@ -2193,7 +2231,7 @@ static void op_commit(struct xe_vm *vm, break; bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence, - fence2); + fence2, op->map.invalidate_on_bind); break; case DRM_GPUVA_OP_REMAP: { @@ -2206,10 +2244,10 @@ static void op_commit(struct xe_vm *vm, if (op->remap.prev) bind_op_commit(vm, tile, pt_update_ops, op->remap.prev, - fence, fence2); + fence, fence2, false); if (op->remap.next) bind_op_commit(vm, tile, pt_update_ops, op->remap.next, - fence, fence2); + fence, fence2, false); break; } case DRM_GPUVA_OP_UNMAP: @@ -2227,7 +2265,7 @@ static void op_commit(struct xe_vm *vm, if (!xe_vma_is_cpu_addr_mirror(vma)) bind_op_commit(vm, tile, pt_update_ops, vma, fence, - fence2); + fence2, false); break; } case DRM_GPUVA_OP_DRIVER: @@ -2257,11 +2295,15 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { .pre_commit = xe_pt_userptr_pre_commit, }; +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) static const struct xe_migrate_pt_update_ops svm_migrate_ops = { .populate = xe_vm_populate_pgtable, .clear = xe_migrate_clear_pgtable_callback, .pre_commit = xe_pt_svm_pre_commit, }; +#else +static const struct xe_migrate_pt_update_ops svm_migrate_ops; +#endif /** * xe_pt_update_ops_run() - Run PT update operations diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 5e65830dad25..2dbf4066d86f 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -340,7 +340,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) if (xe_device_get_root_tile(xe)->mem.vram.usable_size) config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM; - if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM)) + if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR; config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index 9475e3f74958..fc8447a838c4 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -173,6 +173,9 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) if (xa_empty(&sr->xa)) return; + if (IS_SRIOV_VF(gt_to_xe(gt))) + return; + xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name); fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h index 1ae56e2ee7b4..d7e3e150a9a5 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops_types.h +++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h @@ -8,7 +8,7 @@ struct xe_sched_job; -#define MAX_JOB_SIZE_DW 48 +#define MAX_JOB_SIZE_DW 58 #define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4) /** diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 13bb62d3e615..29e694bb1219 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -258,9 +258,6 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, rtp_get_context(ctx, &hwe, >, &xe); - if (IS_SRIOV_VF(xe)) - return; - xe_assert(xe, entries); for (entry = entries; entry - entries < n_entries; entry++) { diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index f8fe61e25518..1d43e183ca21 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -60,7 +60,8 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3 bo = xe_managed_bo_create_pin_map(xe, tile, size, XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) { drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n", size / SZ_1K, bo); diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c index cb813b337fd3..1f710b3fc599 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.c +++ b/drivers/gpu/drm/xe/xe_survivability_mode.c @@ -10,6 +10,7 @@ #include <linux/pci.h> #include <linux/sysfs.h> +#include "xe_configfs.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_heci_gsc.h" @@ -28,20 +29,32 @@ * This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware * to be flashed through mei and collect telemetry. The driver's probe flow is modified * such that it enters survivability mode when pcode initialization is incomplete and boot status - * denotes a failure. The driver then populates the survivability_mode PCI sysfs indicating - * survivability mode and provides additional information required for debug + * denotes a failure. * - * KMD exposes below admin-only readable sysfs in survivability mode + * Survivability mode can also be entered manually using the survivability mode attribute available + * through configfs which is beneficial in several usecases. It can be used to address scenarios + * where pcode does not detect failure or for validation purposes. It can also be used in + * In-Field-Repair (IFR) to repair a single card without impacting the other cards in a node. * - * device/survivability_mode: The presence of this file indicates that the card is in survivability - * mode. Also, provides additional information on why the driver entered - * survivability mode. + * Use below command enable survivability mode manually:: * - * Capability Information - Provides boot status - * Postcode Information - Provides information about the failure - * Overflow Information - Provides history of previous failures - * Auxiliary Information - Certain failures may have information in - * addition to postcode information + * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode + * + * Refer :ref:`xe_configfs` for more details on how to use configfs + * + * Survivability mode is indicated by the below admin-only readable sysfs which provides additional + * debug information:: + * + * /sys/bus/pci/devices/<device>/surivability_mode + * + * Capability Information: + * Provides boot status + * Postcode Information: + * Provides information about the failure + * Overflow Information + * Provides history of previous failures + * Auxiliary Information + * Certain failures may have information in addition to postcode information */ static u32 aux_history_offset(u32 reg_value) @@ -133,6 +146,7 @@ static void xe_survivability_mode_fini(void *arg) struct pci_dev *pdev = to_pci_dev(xe->drm.dev); struct device *dev = &pdev->dev; + xe_configfs_clear_survivability_mode(pdev); sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr); } @@ -186,24 +200,41 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe) return xe->survivability.mode; } -/* - * survivability_mode_requested - check if it's possible to enable - * survivability mode and that was requested by firmware +/** + * xe_survivability_mode_is_requested - check if it's possible to enable survivability + * mode that was requested by firmware or userspace + * @xe: xe device instance * - * This function reads the boot status from Pcode. + * This function reads configfs and boot status from Pcode. * * Return: true if platform support is available and boot status indicates - * failure, false otherwise. + * failure or if survivability mode is requested, false otherwise. */ -static bool survivability_mode_requested(struct xe_device *xe) +bool xe_survivability_mode_is_requested(struct xe_device *xe) { struct xe_survivability *survivability = &xe->survivability; struct xe_mmio *mmio = xe_root_tile_mmio(xe); + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); u32 data; + bool survivability_mode; - if (!IS_DGFX(xe) || xe->info.platform < XE_BATTLEMAGE || IS_SRIOV_VF(xe)) + if (!IS_DGFX(xe) || IS_SRIOV_VF(xe)) return false; + survivability_mode = xe_configfs_get_survivability_mode(pdev); + + if (xe->info.platform < XE_BATTLEMAGE) { + if (survivability_mode) { + dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n"); + xe_configfs_clear_survivability_mode(pdev); + } + return false; + } + + /* Enable survivability mode if set via configfs */ + if (survivability_mode) + return true; + data = xe_mmio_read32(mmio, PCODE_SCRATCH(0)); survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data); @@ -226,7 +257,7 @@ int xe_survivability_mode_enable(struct xe_device *xe) struct xe_survivability_info *info; struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - if (!survivability_mode_requested(xe)) + if (!xe_survivability_mode_is_requested(xe)) return 0; survivability->size = MAX_SCRATCH_MMIO; diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h index d7e64885570d..02231c2bf008 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.h +++ b/drivers/gpu/drm/xe/xe_survivability_mode.h @@ -12,5 +12,6 @@ struct xe_device; int xe_survivability_mode_enable(struct xe_device *xe); bool xe_survivability_mode_is_enabled(struct xe_device *xe); +bool xe_survivability_mode_is_requested(struct xe_device *xe); #endif /* _XE_SURVIVABILITY_MODE_H_ */ diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 0b6547c06961..890f6b2f40e9 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -4,6 +4,7 @@ */ #include "xe_bo.h" +#include "xe_gt_stats.h" #include "xe_gt_tlb_invalidation.h" #include "xe_migrate.h" #include "xe_module.h" @@ -339,6 +340,8 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w) up_write(&vm->lock); } +#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) + static struct xe_vram_region *page_to_vr(struct page *page) { return container_of(page_pgmap(page), struct xe_vram_region, pagemap); @@ -577,6 +580,8 @@ static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = { .copy_to_ram = xe_svm_copy_to_ram, }; +#endif + static const struct drm_gpusvm_ops gpusvm_ops = { .range_alloc = xe_svm_range_alloc, .range_free = xe_svm_range_free, @@ -650,6 +655,7 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range, return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id); } +#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) static struct xe_vram_region *tile_to_vr(struct xe_tile *tile) { return &tile->mem.vram; @@ -711,12 +717,21 @@ unlock: return err; } +#else +static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, + struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) +{ + return -EOPNOTSUPP; +} +#endif + /** * xe_svm_handle_pagefault() - SVM handle page fault * @vm: The VM. * @vma: The CPU address mirror VMA. - * @tile: The tile upon the fault occurred. + * @gt: The gt upon the fault occurred. * @fault_addr: The GPU fault address. * @atomic: The fault atomic access bit. * @@ -726,7 +741,7 @@ unlock: * Return: 0 on success, negative error code on error. */ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, - struct xe_tile *tile, u64 fault_addr, + struct xe_gt *gt, u64 fault_addr, bool atomic) { struct drm_gpusvm_ctx ctx = { @@ -740,12 +755,15 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, struct drm_gpusvm_range *r; struct drm_exec exec; struct dma_fence *fence; + struct xe_tile *tile = gt_to_tile(gt); ktime_t end = 0; int err; lockdep_assert_held_write(&vm->lock); xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1); + retry: /* Always process UNMAPs first so view SVM ranges is current */ err = xe_svm_garbage_collector(vm); @@ -866,6 +884,7 @@ int xe_svm_bo_evict(struct xe_bo *bo) } #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) + static struct drm_pagemap_device_addr xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap, struct device *dev, diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index e059590e5076..3d441eb1f7ea 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -6,16 +6,19 @@ #ifndef _XE_SVM_H_ #define _XE_SVM_H_ +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) + #include <drm/drm_pagemap.h> #include <drm/drm_gpusvm.h> #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER struct xe_bo; -struct xe_vram_region; +struct xe_gt; struct xe_tile; struct xe_vm; struct xe_vma; +struct xe_vram_region; /** struct xe_svm_range - SVM range */ struct xe_svm_range { @@ -43,7 +46,6 @@ struct xe_svm_range { u8 skip_migrate :1; }; -#if IS_ENABLED(CONFIG_DRM_GPUSVM) /** * xe_svm_range_pages_valid() - SVM range pages valid * @range: SVM range @@ -64,7 +66,7 @@ void xe_svm_fini(struct xe_vm *vm); void xe_svm_close(struct xe_vm *vm); int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, - struct xe_tile *tile, u64 fault_addr, + struct xe_gt *gt, u64 fault_addr, bool atomic); bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); @@ -72,7 +74,50 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); int xe_svm_bo_evict(struct xe_bo *bo); void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); + +/** + * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping + * @range: SVM range + * + * Return: True if SVM range has a DMA mapping, False otherwise + */ +static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) +{ + lockdep_assert_held(&range->base.gpusvm->notifier_lock); + return range->base.flags.has_dma_mapping; +} + +#define xe_svm_assert_in_notifier(vm__) \ + lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) + +#define xe_svm_notifier_lock(vm__) \ + drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm) + +#define xe_svm_notifier_unlock(vm__) \ + drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm) + #else +#include <linux/interval_tree.h> + +struct drm_pagemap_device_addr; +struct xe_bo; +struct xe_gt; +struct xe_vm; +struct xe_vma; +struct xe_tile; +struct xe_vram_region; + +#define XE_INTERCONNECT_VRAM 1 + +struct xe_svm_range { + struct { + struct interval_tree_node itree; + const struct drm_pagemap_device_addr *dma_addr; + } base; + u32 tile_present; + u32 tile_invalidated; +}; + static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) { return false; @@ -102,7 +147,7 @@ void xe_svm_close(struct xe_vm *vm) static inline int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, - struct xe_tile *tile, u64 fault_addr, + struct xe_gt *gt, u64 fault_addr, bool atomic) { return 0; @@ -124,27 +169,16 @@ static inline void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) { } -#endif -/** - * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping - * @range: SVM range - * - * Return: True if SVM range has a DMA mapping, False otherwise - */ -static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) +#define xe_svm_assert_in_notifier(...) do {} while (0) +#define xe_svm_range_has_dma_mapping(...) false + +static inline void xe_svm_notifier_lock(struct xe_vm *vm) { - lockdep_assert_held(&range->base.gpusvm->notifier_lock); - return range->base.flags.has_dma_mapping; } -#define xe_svm_assert_in_notifier(vm__) \ - lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) - -#define xe_svm_notifier_lock(vm__) \ - drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm) - -#define xe_svm_notifier_unlock(vm__) \ - drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm) - +static inline void xe_svm_notifier_unlock(struct xe_vm *vm) +{ +} +#endif #endif diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index fb0eda3d5682..2741849bbf4d 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -92,6 +92,8 @@ struct uc_fw_entry { enum xe_platform platform; + enum xe_gt_type gt_type; + struct { const char *path; u16 major; @@ -106,32 +108,37 @@ struct fw_blobs_by_type { u32 count; }; -#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \ - fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \ - fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \ - fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \ - fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \ - fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \ - fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \ - fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \ - fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \ - fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) +/* + * Add an "ANY" define just to convey the meaning it's given here. + */ +#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED + +#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ + fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \ + fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \ + fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \ + fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \ + fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \ + fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \ + fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \ + fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \ + fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ - fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \ - fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \ - fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \ - fw_def(DG1, no_ver(i915, huc, dg1)) \ - fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \ - fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \ - fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \ - fw_def(TIGERLAKE, no_ver(i915, huc, tgl)) + fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \ + fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \ + fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \ + fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \ + fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \ + fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \ + fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) /* for the GSC FW we match the compatibility version and not the release one */ #define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \ - fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \ - fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0)) + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \ + fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0)) #define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \ __stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin" @@ -159,12 +166,13 @@ struct fw_blobs_by_type { a, b, c } /* All blobs need to be declared via MODULE_FIRMWARE() */ -#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \ +#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \ MODULE_FIRMWARE(fw_filename); -#define XE_UC_FW_ENTRY(platform__, entry__) \ +#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \ { \ .platform = XE_ ## platform__, \ + .gt_type = XE_ ## gt_type__, \ entry__, \ }, @@ -222,30 +230,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) [XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) }, [XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) }, }; - static const struct uc_fw_entry *entries; + struct xe_gt *gt = uc_fw_to_gt(uc_fw); enum xe_platform p = xe->info.platform; + const struct uc_fw_entry *entries; u32 count; int i; - xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all)); + xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all)); + xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED); + entries = blobs_all[uc_fw->type].entries; count = blobs_all[uc_fw->type].count; for (i = 0; i < count && p <= entries[i].platform; i++) { - if (p == entries[i].platform) { - uc_fw->path = entries[i].path; - uc_fw->versions.wanted.major = entries[i].major; - uc_fw->versions.wanted.minor = entries[i].minor; - uc_fw->versions.wanted.patch = entries[i].patch; - uc_fw->full_ver_required = entries[i].full_ver_required; - - if (uc_fw->type == XE_UC_FW_TYPE_GSC) - uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY; - else - uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE; - - break; - } + if (p != entries[i].platform) + continue; + + if (entries[i].gt_type != XE_GT_TYPE_ANY && + entries[i].gt_type != gt->info.type) + continue; + + uc_fw->path = entries[i].path; + uc_fw->versions.wanted.major = entries[i].major; + uc_fw->versions.wanted.minor = entries[i].minor; + uc_fw->versions.wanted.patch = entries[i].patch; + uc_fw->full_ver_required = entries[i].full_ver_required; + + if (uc_fw->type == XE_UC_FW_TYPE_GSC) + uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY; + else + uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE; + + break; } } diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 60303998bd61..80e56e232685 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2049,7 +2049,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE && - args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) + args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && + !xe->info.needs_scratch)) return -EINVAL; if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) && @@ -2201,6 +2202,20 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) } #endif +static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags) +{ + if (!xe_vm_in_fault_mode(vm)) + return false; + + if (!xe_vm_has_scratch(vm)) + return false; + + if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE) + return false; + + return true; +} + /* * Create operations list from IOCTL arguments, setup operations fields so parse * and commit steps are decoupled from IOCTL arguments. This step can fail. @@ -2273,6 +2288,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR; op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; op->map.pat_index = pat_index; + op->map.invalidate_on_bind = + __xe_vm_needs_clear_scratch_pages(vm, flags); } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { op->prefetch.region = prefetch_region; } @@ -2472,8 +2489,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, return PTR_ERR(vma); op->map.vma = vma; - if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) && - !op->map.is_cpu_addr_mirror) + if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) && + !op->map.is_cpu_addr_mirror) || + op->map.invalidate_on_bind) xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; @@ -2726,9 +2744,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, switch (op->base.op) { case DRM_GPUVA_OP_MAP: - err = vma_lock_and_validate(exec, op->map.vma, - !xe_vm_in_fault_mode(vm) || - op->map.immediate); + if (!op->map.invalidate_on_bind) + err = vma_lock_and_validate(exec, op->map.vma, + !xe_vm_in_fault_mode(vm) || + op->map.immediate); break; case DRM_GPUVA_OP_REMAP: err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va)); @@ -3109,7 +3128,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror && (!xe_vm_in_fault_mode(vm) || - !IS_ENABLED(CONFIG_DRM_GPUSVM)))) { + !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) { err = -EINVAL; goto free_bind_ops; } @@ -3243,7 +3262,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, XE_64K_PAGE_MASK) || XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) || XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) { - return -EINVAL; + return -EINVAL; } } @@ -3251,7 +3270,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, if (bo->cpu_caching) { if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) { - return -EINVAL; + return -EINVAL; } } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) { /* @@ -3260,7 +3279,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, * how it was mapped on the CPU. Just assume is it * potentially cached on CPU side. */ - return -EINVAL; + return -EINVAL; } /* If a BO is protected it can only be mapped if the key is still valid */ @@ -3847,6 +3866,9 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p) } drm_puts(p, "\n"); + + if (drm_coredump_printer_is_full(p)) + return; } } diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 84fa41b9fa20..1662604c4486 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -330,6 +330,8 @@ struct xe_vma_op_map { bool is_cpu_addr_mirror; /** @dumpable: whether BO is dumped on GPU hang */ bool dumpable; + /** @invalidate: invalidate the VMA before bind */ + bool invalidate_on_bind; /** @pat_index: The pat index to use for this operation. */ u16 pat_index; }; diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index b1f81dca610d..e421a74fb87c 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -49,7 +49,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size) */ static void resize_vram_bar(struct xe_device *xe) { - u64 force_vram_bar_size = xe_modparam.force_vram_bar_size; + int force_vram_bar_size = xe_modparam.force_vram_bar_size; struct pci_dev *pdev = to_pci_dev(xe->drm.dev); struct pci_bus *root = pdev->bus; resource_size_t current_size; @@ -66,6 +66,9 @@ static void resize_vram_bar(struct xe_device *xe) if (!bar_size_mask) return; + if (force_vram_bar_size < 0) + return; + /* set to a specific size? */ if (force_vram_bar_size) { u32 bar_size_bit; diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 24f644c0a673..6f6563cc7430 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -230,6 +230,18 @@ static const struct xe_rtp_entry_sr gt_was[] = { XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), }, + /* Xe2_HPG */ + + { XE_RTP_NAME("16025250150"), + XE_RTP_RULES(GRAPHICS_VERSION(2001)), + XE_RTP_ACTIONS(SET(LSN_VC_REG2, + LSN_LNI_WGT(1) | + LSN_LNE_WGT(1) | + LSN_DIM_X_WGT(1) | + LSN_DIM_Y_WGT(1) | + LSN_DIM_Z_WGT(1))) + }, + /* Xe2_HPM */ { XE_RTP_NAME("16021867713"), diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 9b9e176992a8..9efc5accd43d 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -57,3 +57,5 @@ no_media_l3 MEDIA_VERSION(3000) GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0) 16023105232 GRAPHICS_VERSION_RANGE(2001, 3001) MEDIA_VERSION_RANGE(1301, 3000) +16026508708 GRAPHICS_VERSION_RANGE(1200, 3001) + MEDIA_VERSION_RANGE(1300, 3000) diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index dbecca9bdd54..cfabf5e2a0bb 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO bool "ZynqMP DisplayPort Audio Support" depends on DRM_ZYNQMP_DPSUB depends on SND && SND_SOC + depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m select SND_SOC_GENERIC_DMAENGINE_PCM help Choose this option to enable DisplayPort audio support in the ZynqMP diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index a6a4a871f197..238cbb49963e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp, */ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct zynqmp_dp *dp = bridge_to_dp(bridge); @@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge, } if (dp->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, dp->next_bridge, + ret = drm_bridge_attach(encoder, dp->next_bridge, bridge, flags); if (ret < 0) goto error; @@ -2466,10 +2467,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub) dp->reset = devm_reset_control_get(dp->dev, NULL); if (IS_ERR(dp->reset)) { - if (PTR_ERR(dp->reset) != -EPROBE_DEFER) - dev_err(dp->dev, "failed to get reset: %ld\n", - PTR_ERR(dp->reset)); - ret = PTR_ERR(dp->reset); + ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset), + "failed to get reset\n"); goto err_free; } diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c index f07ff4eb3a6d..1a46a046103f 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c @@ -45,7 +45,6 @@ struct zynqmp_dpsub_audio { struct { struct snd_soc_dai_link_component cpu; - struct snd_soc_dai_link_component codec; struct snd_soc_dai_link_component platform; } components[ZYNQMP_NUM_PCMS]; @@ -403,10 +402,8 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub) link->num_cpus = 1; link->cpus[0].dai_name = audio->dai_name; - link->codecs = &audio->components[i].codec; + link->codecs = &snd_soc_dummy_dlc; link->num_codecs = 1; - link->codecs[0].name = "snd-soc-dummy"; - link->codecs[0].dai_name = "snd-soc-dummy-dai"; link->platforms = &audio->components[i].platform; link->num_platforms = 1; diff --git a/drivers/platform/arm64/acer-aspire1-ec.c b/drivers/platform/arm64/acer-aspire1-ec.c index 2df42406430d..438532a047e6 100644 --- a/drivers/platform/arm64/acer-aspire1-ec.c +++ b/drivers/platform/arm64/acer-aspire1-ec.c @@ -366,7 +366,8 @@ static const struct power_supply_desc aspire_ec_adp_psy_desc = { * USB-C DP Alt mode HPD. */ -static int aspire_ec_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +static int aspire_ec_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; } @@ -451,9 +452,9 @@ static int aspire_ec_probe(struct i2c_client *client) int ret; u8 tmp; - ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL); - if (!ec) - return -ENOMEM; + ec = devm_drm_bridge_alloc(dev, struct aspire_ec, bridge, &aspire_ec_bridge_funcs); + if (IS_ERR(ec)) + return PTR_ERR(ec); ec->client = client; i2c_set_clientdata(client, ec); @@ -496,7 +497,6 @@ static int aspire_ec_probe(struct i2c_client *client) fwnode = device_get_named_child_node(dev, "connector"); if (fwnode) { INIT_WORK(&ec->work, aspire_ec_bridge_update_hpd_work); - ec->bridge.funcs = &aspire_ec_bridge_funcs; ec->bridge.of_node = to_of_node(fwnode); ec->bridge.ops = DRM_BRIDGE_OP_HPD; ec->bridge.type = DRM_MODE_CONNECTOR_USB; diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c index 64117c6367ab..900e9386eceb 100644 --- a/drivers/video/screen_info_generic.c +++ b/drivers/video/screen_info_generic.c @@ -144,3 +144,39 @@ ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, return pos - r; } EXPORT_SYMBOL(screen_info_resources); + +/* + * The meaning of depth and bpp for direct-color formats is + * inconsistent: + * + * - DRM format info specifies depth as the number of color + * bits; including alpha, but not including filler bits. + * - Linux' EFI platform code computes lfb_depth from the + * individual color channels, including the reserved bits. + * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later + * versions use 15. + * - On the kernel command line, 'bpp' of 32 is usually + * XRGB8888 including the filler bits, but 15 is XRGB1555 + * not including the filler bit. + * + * It is not easily possible to fix this in struct screen_info, + * as this could break UAPI. The best solution is to compute + * bits_per_pixel from the color bits, reserved bits and + * reported lfb_depth, whichever is highest. + */ + +u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si) +{ + u32 bits_per_pixel = si->lfb_depth; + + if (bits_per_pixel > 8) { + bits_per_pixel = max(max3(si->red_size + si->red_pos, + si->green_size + si->green_pos, + si->blue_size + si->blue_pos), + si->rsvd_size + si->rsvd_pos); + bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth); + } + + return bits_per_pixel; +} +EXPORT_SYMBOL(__screen_info_lfb_bits_per_pixel); diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h index 6002c5666031..cf17646c1310 100644 --- a/include/drm/bridge/analogix_dp.h +++ b/include/drm/bridge/analogix_dp.h @@ -10,16 +10,18 @@ #include <drm/drm_crtc.h> struct analogix_dp_device; +struct drm_dp_aux; enum analogix_dp_devtype { EXYNOS_DP, RK3288_DP, RK3399_EDP, + RK3588_EDP, }; static inline bool is_rockchip(enum analogix_dp_devtype type) { - return type == RK3288_DP || type == RK3399_EDP; + return type == RK3288_DP || type == RK3399_EDP || type == RK3588_EDP; } struct analogix_dp_plat_data { @@ -48,4 +50,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp); int analogix_dp_start_crc(struct drm_connector *connector); int analogix_dp_stop_crc(struct drm_connector *connector); +struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux); +struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp); + #endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index c413ef68f9a3..3001c0b6e7bb 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1025,6 +1025,7 @@ #define DP_EDP_GENERAL_CAP_2 0x703 # define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) # define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4) +# define DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE (1 << 6) /* eDP 2.0 */ #define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ # define DP_EDP_X_REGION_CAP_MASK (0xf << 0) diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 5ae4241959f2..d9614e2c8939 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -528,13 +528,72 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size); /** + * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD + * @aux: DisplayPort AUX channel (SST or MST) + * @offset: address of the (first) register to read + * @buffer: buffer to store the register values + * @size: number of bytes in @buffer + * + * Returns zero (0) on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux, + unsigned int offset, + void *buffer, size_t size) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, offset, buffer, size); + if (ret < 0) + return ret; + if (ret < size) + return -EPROTO; + + return 0; +} + +/** + * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD + * @aux: DisplayPort AUX channel (SST or MST) + * @offset: address of the (first) register to write + * @buffer: buffer containing the values to write + * @size: number of bytes in @buffer + * + * Returns zero (0) on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux, + unsigned int offset, + void *buffer, size_t size) +{ + int ret; + + ret = drm_dp_dpcd_write(aux, offset, buffer, size); + if (ret < 0) + return ret; + if (ret < size) + return -EPROTO; + + return 0; +} + +/** * drm_dp_dpcd_readb() - read a single byte from the DPCD * @aux: DisplayPort AUX channel * @offset: address of the register to read * @valuep: location where the value of the register will be stored * * Returns the number of bytes transferred (1) on success, or a negative - * error code on failure. + * error code on failure. In most of the cases you should be using + * drm_dp_dpcd_read_byte() instead. */ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, unsigned int offset, u8 *valuep) @@ -549,7 +608,8 @@ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, * @value: value to write to the register * * Returns the number of bytes transferred (1) on success, or a negative - * error code on failure. + * error code on failure. In most of the cases you should be using + * drm_dp_dpcd_write_byte() instead. */ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, unsigned int offset, u8 value) @@ -557,6 +617,34 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, return drm_dp_dpcd_write(aux, offset, &value, 1); } +/** + * drm_dp_dpcd_read_byte() - read a single byte from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to read + * @valuep: location where the value of the register will be stored + * + * Returns zero (0) on success, or a negative error code on failure. + */ +static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux, + unsigned int offset, u8 *valuep) +{ + return drm_dp_dpcd_read_data(aux, offset, valuep, 1); +} + +/** + * drm_dp_dpcd_write_byte() - write a single byte to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the register to write + * @value: value to write to the register + * + * Returns zero (0) on success, or a negative error code on failure. + */ +static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux, + unsigned int offset, u8 value) +{ + return drm_dp_dpcd_write_data(aux, offset, &value, 1); +} + int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, u8 dpcd[DP_RECEIVER_CAP_SIZE]); @@ -566,6 +654,8 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy, u8 link_status[DP_LINK_STATUS_SIZE]); +int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision); +int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision); int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux, int vcpid, u8 start_time_slot, u8 time_slot_count); diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h index 57e3b18c15ec..09145c9ee9fc 100644 --- a/include/drm/display/drm_hdmi_helper.h +++ b/include/drm/display/drm_hdmi_helper.h @@ -28,4 +28,10 @@ unsigned long long drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode, unsigned int bpc, enum hdmi_colorspace fmt); +void +drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate, + unsigned int sample_rate, + unsigned int *out_n, + unsigned int *out_cts); + #endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 4c673f0698fe..38636a593c9d 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -625,6 +625,9 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state, struct drm_connector * drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder); +struct drm_connector * +drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx); struct drm_crtc * drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index d4c75d59fa12..4e418a29a9ff 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -73,7 +73,7 @@ struct drm_bridge_funcs { * * Zero on success, error code on failure. */ - int (*attach)(struct drm_bridge *bridge, + int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags); /** @@ -681,8 +681,10 @@ struct drm_bridge_funcs { /** * @hdmi_audio_startup: * - * Called when ASoC starts an audio stream setup. The - * @hdmi_audio_startup() is optional. + * Called when ASoC starts an audio stream setup. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. * * Returns: * 0 on success, a negative error code otherwise @@ -693,8 +695,10 @@ struct drm_bridge_funcs { /** * @hdmi_audio_prepare: * Configures HDMI-encoder for audio stream. Can be called multiple - * times for each setup. Mandatory if HDMI audio is enabled in the - * bridge's configuration. + * times for each setup. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. * * Returns: * 0 on success, a negative error code otherwise @@ -707,8 +711,10 @@ struct drm_bridge_funcs { /** * @hdmi_audio_shutdown: * - * Shut down the audio stream. Mandatory if HDMI audio is enabled in - * the bridge's configuration. + * Shut down the audio stream. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. * * Returns: * 0 on success, a negative error code otherwise @@ -719,8 +725,10 @@ struct drm_bridge_funcs { /** * @hdmi_audio_mute_stream: * - * Mute/unmute HDMI audio stream. The @hdmi_audio_mute_stream callback - * is optional. + * Mute/unmute HDMI audio stream. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops. * * Returns: * 0 on success, a negative error code otherwise @@ -730,6 +738,65 @@ struct drm_bridge_funcs { bool enable, int direction); /** + * @dp_audio_startup: + * + * Called when ASoC starts a DisplayPort audio stream setup. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*dp_audio_startup)(struct drm_connector *connector, + struct drm_bridge *bridge); + + /** + * @dp_audio_prepare: + * Configures DisplayPort audio stream. Can be called multiple + * times for each setup. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*dp_audio_prepare)(struct drm_connector *connector, + struct drm_bridge *bridge, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms); + + /** + * @dp_audio_shutdown: + * + * Shut down the DisplayPort audio stream. + * + * This callback is optional but it must be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + void (*dp_audio_shutdown)(struct drm_connector *connector, + struct drm_bridge *bridge); + + /** + * @dp_audio_mute_stream: + * + * Mute/unmute DisplayPort audio stream. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ + int (*dp_audio_mute_stream)(struct drm_connector *connector, + struct drm_bridge *bridge, + bool enable, int direction); + + /** * @debugfs_init: * * Allows bridges to create bridge-specific debugfs files. @@ -814,6 +881,32 @@ enum drm_bridge_ops { * drivers. */ DRM_BRIDGE_OP_HDMI = BIT(4), + /** + * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations. + * Bridges that set this flag must implement the + * &drm_bridge_funcs->hdmi_audio_prepare and + * &drm_bridge_funcs->hdmi_audio_shutdown callbacks. + * + * Note: currently there can be at most one bridge in a chain that sets + * this bit. This is to simplify corresponding glue code in connector + * drivers. Also it is not possible to have a bridge in the chain that + * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this + * flag. + */ + DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5), + /** + * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations. + * Bridges that set this flag must implement the + * &drm_bridge_funcs->dp_audio_prepare and + * &drm_bridge_funcs->dp_audio_shutdown callbacks. + * + * Note: currently there can be at most one bridge in a chain that sets + * this bit. This is to simplify corresponding glue code in connector + * drivers. Also it is not possible to have a bridge in the chain that + * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this + * flag. + */ + DRM_BRIDGE_OP_DP_AUDIO = BIT(6), }; /** @@ -840,6 +933,18 @@ struct drm_bridge { const struct drm_bridge_timings *timings; /** @funcs: control functions */ const struct drm_bridge_funcs *funcs; + + /** + * @container: Pointer to the private driver struct embedding this + * @struct drm_bridge. + */ + void *container; + + /** + * @refcount: reference count of users referencing this bridge. + */ + struct kref refcount; + /** @driver_private: pointer to the bridge driver's internal context */ void *driver_private; /** @ops: bitmask of operations supported by the bridge */ @@ -914,23 +1019,28 @@ struct drm_bridge { unsigned int max_bpc; /** - * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec + * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if + * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set. */ struct device *hdmi_audio_dev; /** * @hdmi_audio_max_i2s_playback_channels: maximum number of playback - * I2S channels for the HDMI codec + * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or + * @DRM_BRIDGE_OP_DP_AUDIO. */ int hdmi_audio_max_i2s_playback_channels; /** - * @hdmi_audio_spdif_playback: set if HDMI codec has S/PDIF playback port + * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback + * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO. */ unsigned int hdmi_audio_spdif_playback : 1; /** - * @hdmi_audio_dai_port: sound DAI port, -1 if it is not enabled + * @hdmi_audio_dai_port: sound DAI port for either of + * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is + * not used. */ int hdmi_audio_dai_port; }; @@ -941,6 +1051,30 @@ drm_priv_to_bridge(struct drm_private_obj *priv) return container_of(priv, struct drm_bridge, base); } +struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge); +void drm_bridge_put(struct drm_bridge *bridge); + +void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, + const struct drm_bridge_funcs *funcs); + +/** + * devm_drm_bridge_alloc - Allocate and initialize a bridge + * @dev: struct device of the bridge device + * @type: the type of the struct which contains struct &drm_bridge + * @member: the name of the &drm_bridge within @type + * @funcs: callbacks for this bridge + * + * The reference count of the returned bridge is initialized to 1. This + * reference will be automatically dropped via devm (by calling + * drm_bridge_put()) when @dev is removed. + * + * Returns: + * Pointer to new bridge, or ERR_PTR on failure. + */ +#define devm_drm_bridge_alloc(dev, type, member, funcs) \ + ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \ + offsetof(type, member), funcs)) + void drm_bridge_add(struct drm_bridge *bridge); int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); @@ -958,6 +1092,38 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np) #endif /** + * drm_bridge_get_current_state() - Get the current bridge state + * @bridge: bridge object + * + * This function must be called with the modeset lock held. + * + * RETURNS: + * + * The current bridge state, or NULL if there is none. + */ +static inline struct drm_bridge_state * +drm_bridge_get_current_state(struct drm_bridge *bridge) +{ + if (!bridge) + return NULL; + + /* + * Only atomic bridges will have bridge->base initialized by + * drm_atomic_private_obj_init(), so we need to make sure we're + * working with one before we try to use the lock. + */ + if (!bridge->funcs || !bridge->funcs->atomic_reset) + return NULL; + + drm_modeset_lock_assert_held(&bridge->base.lock); + + if (!bridge->base.state) + return NULL; + + return drm_priv_to_bridge_state(bridge->base.state); +} + +/** * drm_bridge_get_next_bridge() - Get the next bridge in the chain * @bridge: bridge object * @@ -1108,4 +1274,7 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif +void drm_bridge_debugfs_params(struct dentry *root); +void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder); + #endif diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h new file mode 100644 index 000000000000..6c35b479ec2a --- /dev/null +++ b/include/drm/drm_bridge_helper.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __DRM_BRIDGE_HELPER_H_ +#define __DRM_BRIDGE_HELPER_H_ + +struct drm_bridge; +struct drm_modeset_acquire_ctx; + +int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge, + struct drm_modeset_acquire_ctx *ctx); + +#endif // __DRM_BRIDGE_HELPER_H_ diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 6ea54a578cda..e2f894f1b90a 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -65,6 +65,28 @@ struct drm_device { struct device *dev; /** + * @dma_dev: + * + * Device for DMA operations. Only required if the device @dev + * cannot perform DMA by itself. Should be NULL otherwise. Call + * drm_dev_dma_dev() to get the DMA device instead of using this + * field directly. Call drm_dev_set_dma_dev() to set this field. + * + * DRM devices are sometimes bound to virtual devices that cannot + * perform DMA by themselves. Drivers should set this field to the + * respective DMA controller. + * + * Devices on USB and other peripheral busses also cannot perform + * DMA by themselves. The @dma_dev field should point the bus + * controller that does DMA on behalve of such a device. Required + * for importing buffers via dma-buf. + * + * If set, the DRM core automatically releases the reference on the + * device. + */ + struct device *dma_dev; + + /** * @managed: * * Managed resources linked to the lifetime of this &drm_device as @@ -327,4 +349,23 @@ struct drm_device { struct dentry *debugfs_root; }; +void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev); + +/** + * drm_dev_dma_dev - returns the DMA device for a DRM device + * @dev: DRM device + * + * Returns the DMA device of the given DRM device. By default, this + * the DRM device's parent. See drm_dev_set_dma_dev(). + * + * Returns: + * A DMA-capable device for the DRM device. + */ +static inline struct device *drm_dev_dma_dev(struct drm_device *dev) +{ + if (dev->dma_dev) + return dev->dma_dev; + return dev->dev; +} + #endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index eaac5e665892..b38409670868 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -437,7 +437,7 @@ bool drm_detect_monitor_audio(const struct edid *edid); enum hdmi_quantization_range drm_default_rgb_quant_range(const struct drm_display_mode *mode); int drm_add_modes_noedid(struct drm_connector *connector, - int hdisplay, int vdisplay); + unsigned int hdisplay, unsigned int vdisplay); int drm_edid_header_is_valid(const void *edid); bool drm_edid_is_valid(struct edid *edid); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index bcd54020d6ba..a3133a08267c 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -159,7 +159,8 @@ struct drm_gem_object_funcs { * @vmap: * * Returns a virtual address for the buffer. Used by the - * drm_gem_dmabuf_vmap() helper. + * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation + * lock. * * This callback is optional. */ @@ -169,7 +170,8 @@ struct drm_gem_object_funcs { * @vunmap: * * Releases the address previously returned by @vmap. Used by the - * drm_gem_dmabuf_vunmap() helper. + * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation + * lock. * * This callback is optional. */ @@ -192,7 +194,8 @@ struct drm_gem_object_funcs { * @evict: * * Evicts gem object out from memory. Used by the drm_gem_object_evict() - * helper. Returns 0 on success, -errno otherwise. + * helper. Returns 0 on success, -errno otherwise. Called with a held + * GEM reservation lock. * * This callback is optional. */ @@ -537,8 +540,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, void drm_gem_lock(struct drm_gem_object *obj); void drm_gem_unlock(struct drm_gem_object *obj); -int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); -void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); @@ -561,7 +564,7 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)); -int drm_gem_evict(struct drm_gem_object *obj); +int drm_gem_evict_locked(struct drm_gem_object *obj); /** * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index cef5a6b5a4d6..b4f993da3cae 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -37,7 +37,18 @@ struct drm_gem_shmem_object { * Reference count on the pages table. * The pages are put when the count reaches zero. */ - unsigned int pages_use_count; + refcount_t pages_use_count; + + /** + * @pages_pin_count: + * + * Reference count on the pinned pages table. + * + * Pages are hard-pinned and reside in memory if count + * greater than zero. Otherwise, when count is zero, the pages are + * allowed to be evicted and purged by memory shrinker. + */ + refcount_t pages_pin_count; /** * @madv: State for madvise @@ -71,7 +82,7 @@ struct drm_gem_shmem_object { * Reference count on the virtual address. * The address are un-mapped when the count reaches zero. */ - unsigned int vmap_use_count; + refcount_t vmap_use_count; /** * @pages_mark_dirty_on_put: @@ -102,28 +113,28 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de struct vfsmount *gemfs); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) { return (shmem->madv > 0) && - !shmem->vmap_use_count && shmem->sgt && + !refcount_read(&shmem->pages_pin_count) && shmem->sgt && !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base); } -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); @@ -214,12 +225,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ } /* - * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap() + * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked() * @obj: GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store. * - * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vmap handler. + * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vmap handler. * * Returns: * 0 on success or a negative error code on failure. @@ -229,7 +240,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_vmap(shmem, map); + return drm_gem_shmem_vmap_locked(shmem, map); } /* @@ -237,15 +248,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, * @obj: GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vunmap handler. + * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vunmap handler. */ static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_vunmap(shmem, map); + drm_gem_shmem_vunmap_locked(shmem, map); } /** diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 1c62d1d4458c..4948379237e9 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -9,6 +9,7 @@ #include <kunit/test.h> +struct drm_connector; struct drm_crtc_funcs; struct drm_crtc_helper_funcs; struct drm_device; @@ -118,6 +119,13 @@ drm_kunit_helper_create_crtc(struct kunit *test, const struct drm_crtc_funcs *funcs, const struct drm_crtc_helper_funcs *helper_funcs); +int drm_kunit_helper_enable_crtc_connector(struct kunit *test, + struct drm_device *drm, + struct drm_crtc *crtc, + struct drm_connector *connector, + const struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx); + int drm_kunit_add_mode_destroy_action(struct kunit *test, struct drm_display_mode *mode); diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index bd40a443385c..b37860f4a895 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -293,6 +293,7 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx, const void *payload, size_t size); ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, size_t num_params, void *data, size_t size); +u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format); #define mipi_dsi_msleep(ctx, delay) \ do { \ @@ -417,28 +418,6 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); } while (0) /** - * mipi_dsi_dcs_write_seq - transmit a DCS command with payload - * - * This macro will print errors for you and will RETURN FROM THE CALLING - * FUNCTION (yes this is non-intuitive) upon error. - * - * Because of the non-intuitive return behavior, THIS MACRO IS DEPRECATED. - * Please replace calls of it with mipi_dsi_dcs_write_seq_multi(). - * - * @dsi: DSI peripheral device - * @cmd: Command - * @seq: buffer containing data to be transmitted - */ -#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \ - do { \ - static const u8 d[] = { cmd, seq }; \ - int ret; \ - ret = mipi_dsi_dcs_write_buffer_chatty(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) \ - return ret; \ - } while (0) - -/** * mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload * * This macro will print errors for you and error handling is optimized for diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 271765e2e9f2..4b8f0370b79b 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -532,8 +532,8 @@ struct drm_mode_config { */ struct list_head privobj_list; - int min_width, min_height; - int max_width, max_height; + unsigned int min_width, min_height; + unsigned int max_width, max_height; const struct drm_mode_config_funcs *funcs; /* output poll support */ diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index a9c042c8dea1..843fb756a295 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -28,6 +28,7 @@ #include <linux/errno.h> #include <linux/list.h> #include <linux/mutex.h> +#include <linux/kref.h> struct backlight_device; struct dentry; @@ -266,20 +267,60 @@ struct drm_panel { * If true then the panel has been enabled. */ bool enabled; + + /** + * @container: Pointer to the private driver struct embedding this + * @struct drm_panel. + */ + void *container; + + /** + * @refcount: reference count of users referencing this panel. + */ + struct kref refcount; }; +void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset, + const struct drm_panel_funcs *funcs, + int connector_type); + +/** + * devm_drm_panel_alloc - Allocate and initialize a refcounted panel. + * + * @dev: struct device of the panel device + * @type: the type of the struct which contains struct &drm_panel + * @member: the name of the &drm_panel within @type + * @funcs: callbacks for this panel + * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to + * the panel interface + * + * The reference count of the returned panel is initialized to 1. This + * reference will be automatically dropped via devm (by calling + * drm_panel_put()) when @dev is removed. + * + * Returns: + * Pointer to container structure embedding the panel, ERR_PTR on failure. + */ +#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \ + ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \ + offsetof(type, member), funcs, \ + connector_type)) + void drm_panel_init(struct drm_panel *panel, struct device *dev, const struct drm_panel_funcs *funcs, int connector_type); +struct drm_panel *drm_panel_get(struct drm_panel *panel); +void drm_panel_put(struct drm_panel *panel); + void drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); -int drm_panel_prepare(struct drm_panel *panel); -int drm_panel_unprepare(struct drm_panel *panel); +void drm_panel_prepare(struct drm_panel *panel); +void drm_panel_unprepare(struct drm_panel *panel); -int drm_panel_enable(struct drm_panel *panel); -int drm_panel_disable(struct drm_panel *panel); +void drm_panel_enable(struct drm_panel *panel); +void drm_panel_disable(struct drm_panel *panel); int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector); diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h index ff78d00c3da5..310c88c4d336 100644 --- a/include/drm/drm_panic.h +++ b/include/drm/drm_panic.h @@ -40,6 +40,16 @@ struct drm_scanout_buffer { struct iosys_map map[DRM_FORMAT_MAX_PLANES]; /** + * @pages: Optional, if the scanout buffer is not mapped, set this field + * to the array of pages of the scanout buffer. The panic code will use + * kmap_local_page_try_from_panic() to map one page at a time to write + * all the pixels. This array shouldn't be allocated from the + * get_scanoutbuffer() callback. + * The scanout buffer should be in linear format. + */ + struct page **pages; + + /** * @width: Width of the scanout buffer, in pixels. */ unsigned int width; @@ -57,7 +67,7 @@ struct drm_scanout_buffer { /** * @set_pixel: Optional function, to set a pixel color on the * framebuffer. It allows to handle special tiling format inside the - * driver. + * driver. It takes precedence over the @map and @pages fields. */ void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x, unsigned int y, u32 color); diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index f31eba1c7cab..ab017b05e175 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -345,6 +345,26 @@ drm_coredump_printer(struct drm_print_iterator *iter) } /** + * drm_coredump_printer_is_full() - DRM coredump printer output is full + * @p: DRM coredump printer + * + * DRM printer output is full, useful to short circuit coredump printing once + * printer is full. + * + * RETURNS: + * True if DRM coredump printer output buffer is full, False otherwise + */ +static inline bool drm_coredump_printer_is_full(struct drm_printer *p) +{ + struct drm_print_iterator *iterator = p->arg; + + if (p->printfn != __drm_printfn_coredump) + return true; + + return !iterator->remain; +} + +/** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file * @f: the &struct seq_file to output to * diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index d6ce7b218b77..840ae5f798c2 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -17,7 +17,7 @@ int drm_helper_probe_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force); -int drmm_kms_helper_poll_init(struct drm_device *dev); +void drmm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_fini(struct drm_device *dev); bool drm_helper_hpd_irq_event(struct drm_device *dev); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 50928a7ae98e..1a7e377d4cbb 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -383,8 +383,15 @@ struct drm_sched_job { struct xarray dependencies; }; +/** + * enum drm_gpu_sched_stat - the scheduler's status + * + * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use. + * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded. + * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore. + */ enum drm_gpu_sched_stat { - DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ + DRM_GPU_SCHED_STAT_NONE, DRM_GPU_SCHED_STAT_NOMINAL, DRM_GPU_SCHED_STAT_ENODEV, }; @@ -410,10 +417,36 @@ struct drm_sched_backend_ops { struct drm_sched_entity *s_entity); /** - * @run_job: Called to execute the job once all of the dependencies - * have been resolved. This may be called multiple times, if - * timedout_job() has happened and drm_sched_job_recovery() - * decides to try it again. + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. + * + * @sched_job: the job to run + * + * The deprecated drm_sched_resubmit_jobs() (called by &struct + * drm_sched_backend_ops.timedout_job) can invoke this again with the + * same parameters. Using this is discouraged because it violates + * dma_fence rules, notably dma_fence_init() has to be called on + * already initialized fences for a second time. Moreover, this is + * dangerous because attempts to allocate memory might deadlock with + * memory management code waiting for the reset to complete. + * + * TODO: Document what drivers should do / use instead. + * + * This method is called in a workqueue context - either from the + * submit_wq the driver passed through drm_sched_init(), or, if the + * driver passed NULL, a separate, ordered workqueue the scheduler + * allocated. + * + * Note that the scheduler expects to 'inherit' its own reference to + * this fence from the callback. It does not invoke an extra + * dma_fence_get() on it. Consequently, this callback must take a + * reference for the scheduler, and additional ones for the driver's + * respective needs. + * + * Return: + * * On success: dma_fence the driver must signal once the hardware has + * completed the job ("hardware fence"). + * * On failure: NULL or an ERR_PTR. */ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); @@ -421,43 +454,52 @@ struct drm_sched_backend_ops { * @timedout_job: Called when a job has taken too long to execute, * to trigger GPU recovery. * - * This method is called in a workqueue context. + * @sched_job: The job that has timed out + * + * Drivers typically issue a reset to recover from GPU hangs. + * This procedure looks very different depending on whether a firmware + * or a hardware scheduler is being used. + * + * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each + * scheduler has one entity. Hence, the steps taken typically look as + * follows: * - * Drivers typically issue a reset to recover from GPU hangs, and this - * procedure usually follows the following workflow: + * 1. Stop the scheduler using drm_sched_stop(). This will pause the + * scheduler workqueues and cancel the timeout work, guaranteeing + * that nothing is queued while the ring is being removed. + * 2. Remove the ring. The firmware will make sure that the + * corresponding parts of the hardware are resetted, and that other + * rings are not impacted. + * 3. Kill the entity and the associated scheduler. * - * 1. Stop the scheduler using drm_sched_stop(). This will park the - * scheduler thread and cancel the timeout work, guaranteeing that - * nothing is queued while we reset the hardware queue - * 2. Try to gracefully stop non-faulty jobs (optional) - * 3. Issue a GPU reset (driver-specific) - * 4. Re-submit jobs using drm_sched_resubmit_jobs() - * 5. Restart the scheduler using drm_sched_start(). At that point, new - * jobs can be queued, and the scheduler thread is unblocked + * + * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from + * one or more entities to one ring. This implies that all entities + * associated with the affected scheduler cannot be torn down, because + * this would effectively also affect innocent userspace processes which + * did not submit faulty jobs (for example). + * + * Consequently, the procedure to recover with a hardware scheduler + * should look like this: + * + * 1. Stop all schedulers impacted by the reset using drm_sched_stop(). + * 2. Kill the entity the faulty job stems from. + * 3. Issue a GPU reset on all faulty rings (driver-specific). + * 4. Re-submit jobs on all schedulers impacted by re-submitting them to + * the entities which are still alive. + * 5. Restart all schedulers that were stopped in step #1 using + * drm_sched_start(). * * Note that some GPUs have distinct hardware queues but need to reset * the GPU globally, which requires extra synchronization between the - * timeout handler of the different &drm_gpu_scheduler. One way to - * achieve this synchronization is to create an ordered workqueue - * (using alloc_ordered_workqueue()) at the driver level, and pass this - * queue to drm_sched_init(), to guarantee that timeout handlers are - * executed sequentially. The above workflow needs to be slightly - * adjusted in that case: - * - * 1. Stop all schedulers impacted by the reset using drm_sched_stop() - * 2. Try to gracefully stop non-faulty jobs on all queues impacted by - * the reset (optional) - * 3. Issue a GPU reset on all faulty queues (driver-specific) - * 4. Re-submit jobs on all schedulers impacted by the reset using - * drm_sched_resubmit_jobs() - * 5. Restart all schedulers that were stopped in step #1 using - * drm_sched_start() + * timeout handlers of different schedulers. One way to achieve this + * synchronization is to create an ordered workqueue (using + * alloc_ordered_workqueue()) at the driver level, and pass this queue + * as drm_sched_init()'s @timeout_wq parameter. This will guarantee + * that timeout handlers are executed sequentially. * - * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, - * and the underlying driver has started or completed recovery. + * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat * - * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer - * available, i.e. has been unplugged. */ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); diff --git a/include/drm/intel/intel-gtt.h b/include/drm/intel/intel-gtt.h index cb0d5b7200c7..f53bcff01f22 100644 --- a/include/drm/intel/intel-gtt.h +++ b/include/drm/intel/intel-gtt.h @@ -28,6 +28,8 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags); void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); +dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg, + bool *is_present, bool *is_local); /* Special gtt memory types */ #define AGP_DCACHE_MEMORY 1 diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 903cd1030110..cf027558b6db 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -172,7 +172,6 @@ struct ttm_bo_kmap_obj { * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple * BOs share the same reservation object. - * @force_alloc: Don't check the memory account during suspend or CPU page * faults. Should only be used by TTM internally. * @resv: Reservation object to allow reserved evictions with. * @bytes_moved: Statistics on how many bytes have been moved. @@ -185,7 +184,6 @@ struct ttm_operation_ctx { bool no_wait_gpu; bool gfp_retry_mayfail; bool allow_res_evict; - bool force_alloc; struct dma_resv *resv; uint64_t bytes_moved; }; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 36216d28d8bd..544f8f8c3f44 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -35,15 +35,6 @@ struct dma_buf_attachment; */ struct dma_buf_ops { /** - * @cache_sgt_mapping: - * - * If true the framework will cache the first mapping made for each - * attachment. This avoids creating mappings for attachments multiple - * times. - */ - bool cache_sgt_mapping; - - /** * @attach: * * This is called from dma_buf_attach() to make sure that a given @@ -493,8 +484,6 @@ struct dma_buf_attach_ops { * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. - * @sgt: cached mapping. - * @dir: direction of cached mapping. * @peer2peer: true if the importer can handle peer resources without pages. * @priv: exporter specific attachment data. * @importer_ops: importer operations for this attachment, if provided @@ -514,8 +503,6 @@ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; - struct sg_table *sgt; - enum dma_data_direction dir; bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; @@ -583,20 +570,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) return !!dmabuf->ops->pin; } -/** - * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic - * mappings - * @attach: the DMA-buf attachment to check - * - * Returns true if a DMA-buf importer wants to call the map/unmap functions with - * the dma_resv lock held. - */ -static inline bool -dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) -{ - return !!attach->importer_ops; -} - struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); struct dma_buf_attachment * diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index e7ad819962e3..b12776883d14 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -169,8 +169,8 @@ struct dma_fence_ops { * implementation know that there is another driver waiting on the * signal (ie. hw->sw case). * - * This function can be called from atomic context, but not - * from irq context, so normal spinlocks can be used. + * This is called with irq's disabled, so only spinlocks which disable + * IRQ's can be used in the code outside of this callback. * * A return value of false indicates the fence already passed, * or some failure occurred that made it impossible to enable @@ -239,27 +239,6 @@ struct dma_fence_ops { void (*release)(struct dma_fence *fence); /** - * @fence_value_str: - * - * Callback to fill in free-form debug info specific to this fence, like - * the sequence number. - * - * This callback is optional. - */ - void (*fence_value_str)(struct dma_fence *fence, char *str, int size); - - /** - * @timeline_value_str: - * - * Fills in the current value of the timeline as a string, like the - * sequence number. Note that the specific fence passed to this function - * should not matter, drivers should only use it to look up the - * corresponding timeline structures. - */ - void (*timeline_value_str)(struct dma_fence *fence, - char *str, int size); - - /** * @set_deadline: * * Callback to allow a fence waiter to inform the fence signaler of diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index dd100e849f5e..9a7683d79a4b 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -73,6 +73,14 @@ static inline void *kmap_local_page(struct page *page) return __kmap_local_page_prot(page, kmap_prot); } +static inline void *kmap_local_page_try_from_panic(struct page *page) +{ + if (!PageHighMem(page)) + return page_address(page); + /* If the page is in HighMem, it's not safe to kmap it.*/ + return NULL; +} + static inline void *kmap_local_folio(struct folio *folio, size_t offset) { struct page *page = folio_page(folio, offset / PAGE_SIZE); @@ -180,6 +188,11 @@ static inline void *kmap_local_page(struct page *page) return page_address(page); } +static inline void *kmap_local_page_try_from_panic(struct page *page) +{ + return page_address(page); +} + static inline void *kmap_local_folio(struct folio *folio, size_t offset) { return page_address(&folio->page) + offset; diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 6a4a3cec4638..923d68e07679 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -126,8 +126,17 @@ static inline unsigned int screen_info_video_type(const struct screen_info *si) return VIDEO_TYPE_CGA; } +static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si) +{ + if (si->vesapm_seg < 0xc000) + return 0; + return (si->vesapm_seg << 4) + si->vesapm_off; +} + ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num); +u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si); + #if defined(CONFIG_PCI) void screen_info_apply_fixups(void); struct pci_dev *screen_info_pci_dev(const struct screen_info *si); diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h new file mode 100644 index 000000000000..de67f1c603af --- /dev/null +++ b/include/uapi/drm/asahi_drm.h @@ -0,0 +1,1194 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) The Asahi Linux Contributors + * Copyright (C) 2018-2023 Collabora Ltd. + * Copyright (C) 2014-2018 Broadcom + */ +#ifndef _ASAHI_DRM_H_ +#define _ASAHI_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: Introduction to the Asahi UAPI + * + * This documentation describes the Asahi IOCTLs. + * + * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed + * from Panthor): + * + * - Structures must be aligned on 64-bit/8-byte. If the object is not + * naturally aligned, a padding field must be added. + * - Fields must be explicitly aligned to their natural type alignment with + * pad[0..N] fields. + * - All padding fields will be checked by the driver to make sure they are + * zeroed. + * - Flags can be added, but not removed/replaced. + * - New fields can be added to the main structures (the structures + * directly passed to the ioctl). Those fields can be added at the end of + * the structure, or replace existing padding fields. Any new field being + * added must preserve the behavior that existed before those fields were + * added when a value of zero is passed. + * - New fields can be added to indirect objects (objects pointed by the + * main structure), iff those objects are passed a size to reflect the + * size known by the userspace driver (see + * drm_asahi_cmd_header::size). + * - If the kernel driver is too old to know some fields, those will be + * ignored if zero, and otherwise rejected (and so will be zero on output). + * - If userspace is too old to know some fields, those will be zeroed + * (input) before the structure is parsed by the kernel driver. + * - Each new flag/field addition must come with a driver version update so + * the userspace driver doesn't have to guess which flags are supported. + * - Structures should not contain unions, as this would defeat the + * extensibility of such structures. + * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed + * at the end of the drm_asahi_ioctl_id enum. + */ + +/** + * enum drm_asahi_ioctl_id - IOCTL IDs + * + * Place new ioctls at the end, don't re-order, don't replace or remove entries. + * + * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx + * definitions instead. + */ +enum drm_asahi_ioctl_id { + /** @DRM_ASAHI_GET_PARAMS: Query device properties. */ + DRM_ASAHI_GET_PARAMS = 0, + + /** @DRM_ASAHI_GET_TIME: Query device time. */ + DRM_ASAHI_GET_TIME, + + /** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */ + DRM_ASAHI_VM_CREATE, + + /** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */ + DRM_ASAHI_VM_DESTROY, + + /** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */ + DRM_ASAHI_VM_BIND, + + /** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */ + DRM_ASAHI_GEM_CREATE, + + /** + * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a + * given GEM handle. + */ + DRM_ASAHI_GEM_MMAP_OFFSET, + + /** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */ + DRM_ASAHI_GEM_BIND_OBJECT, + + /** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */ + DRM_ASAHI_QUEUE_CREATE, + + /** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */ + DRM_ASAHI_QUEUE_DESTROY, + + /** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */ + DRM_ASAHI_SUBMIT, +}; + +#define DRM_ASAHI_MAX_CLUSTERS 64 + +/** + * struct drm_asahi_params_global - Global parameters. + * + * This struct may be queried by drm_asahi_get_params. + */ +struct drm_asahi_params_global { + /** @features: Feature bits from drm_asahi_feature */ + __u64 features; + + /** @gpu_generation: GPU generation, e.g. 13 for G13G */ + __u32 gpu_generation; + + /** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */ + __u32 gpu_variant; + + /** + * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or + * 0x21 for 'C1' + */ + __u32 gpu_revision; + + /** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */ + __u32 chip_id; + + /** @num_dies: Number of dies in the SoC */ + __u32 num_dies; + + /** @num_clusters_total: Number of GPU clusters (across all dies) */ + __u32 num_clusters_total; + + /** + * @num_cores_per_cluster: Number of logical cores per cluster + * (including inactive/nonexistent) + */ + __u32 num_cores_per_cluster; + + /** @max_frequency_khz: Maximum GPU core clock frequency */ + __u32 max_frequency_khz; + + /** @core_masks: Bitmask of present/enabled cores per cluster */ + __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS]; + + /** + * @vm_start: VM range start VMA. Together with @vm_end, this defines + * the window of valid GPU VAs. Userspace is expected to subdivide VAs + * out of this window. + * + * This window contains all virtual addresses that userspace needs to + * know about. There may be kernel-internal GPU VAs outside this range, + * but that detail is not relevant here. + */ + __u64 vm_start; + + /** @vm_end: VM range end VMA */ + __u64 vm_end; + + /** + * @vm_kernel_min_size: Minimum kernel VMA window size. + * + * When creating a VM, userspace is required to carve out a section of + * virtual addresses (within the range given by @vm_start and + * @vm_end). The kernel will allocate various internal structures + * within the specified VA range. + * + * Allowing userspace to choose the VA range for the kernel, rather than + * the kernel reserving VAs and requiring userspace to cope, can assist + * in implementing SVM. + */ + __u64 vm_kernel_min_size; + + /** + * @max_commands_per_submission: Maximum number of supported commands + * per submission. This mirrors firmware limits. Userspace must split up + * larger command buffers, which may require inserting additional + * synchronization. + */ + __u32 max_commands_per_submission; + + /** + * @max_attachments: Maximum number of drm_asahi_attachment's per + * command + */ + __u32 max_attachments; + + /** + * @command_timestamp_frequency_hz: Timebase frequency for timestamps + * written during command execution, specified via drm_asahi_timestamp + * structures. As this rate is controlled by the firmware, it is a + * queryable parameter. + * + * Userspace must divide by this frequency to convert timestamps to + * seconds, rather than hardcoding a particular firmware's rate. + */ + __u64 command_timestamp_frequency_hz; +}; + +/** + * enum drm_asahi_feature - Feature bits + * + * This covers only features that userspace cannot infer from the architecture + * version. Most features don't need to be here. + */ +enum drm_asahi_feature { + /** + * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader + * loads of unmapped memory will return zero. Shader stores to unmapped + * memory will be silently discarded. Note that only shader load/store + * is affected. Other hardware units are not affected, notably including + * texture sampling. + * + * Soft fault is set when initializing the GPU and cannot be runtime + * toggled. Therefore, it is exposed as a feature bit and not a + * userspace-settable flag on the VM. When soft fault is enabled, + * userspace can speculate memory accesses more aggressively. + */ + DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0, +}; + +/** + * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS + */ +struct drm_asahi_get_params { + /** @param_group: Parameter group to fetch (MBZ) */ + __u32 param_group; + + /** @pad: MBZ */ + __u32 pad; + + /** @pointer: User pointer to write parameter struct */ + __u64 pointer; + + /** + * @size: Size of the user buffer. In case of older userspace, this may + * be less than sizeof(struct drm_asahi_params_global). The kernel will + * not write past the length specified here, allowing extensibility. + */ + __u64 size; +}; + +/** + * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE + */ +struct drm_asahi_vm_create { + /** + * @kernel_start: Start of the kernel-reserved address range. See + * drm_asahi_params_global::vm_kernel_min_size. + * + * Both @kernel_start and @kernel_end must be within the range of + * valid VAs given by drm_asahi_params_global::vm_start and + * drm_asahi_params_global::vm_end. The size of the kernel range + * (@kernel_end - @kernel_start) must be at least + * drm_asahi_params_global::vm_kernel_min_size. + * + * Userspace must not bind any memory on this VM into this reserved + * range, it is for kernel use only. + */ + __u64 kernel_start; + + /** + * @kernel_end: End of the kernel-reserved address range. See + * @kernel_start. + */ + __u64 kernel_end; + + /** @vm_id: Returned VM ID */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY + */ +struct drm_asahi_vm_destroy { + /** @vm_id: VM ID to be destroyed */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * enum drm_asahi_gem_flags - Flags for GEM creation + */ +enum drm_asahi_gem_flags { + /** + * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback. + * + * Map as writeback instead of write-combine. This optimizes for CPU + * reads. + */ + DRM_ASAHI_GEM_WRITEBACK = (1L << 0), + + /** + * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports). + */ + DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1), +}; + +/** + * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE + */ +struct drm_asahi_gem_create { + /** @size: Size of the BO */ + __u64 size; + + /** @flags: Combination of drm_asahi_gem_flags flags. */ + __u32 flags; + + /** + * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set + */ + __u32 vm_id; + + /** @handle: Returned GEM handle for the BO */ + __u32 handle; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_asahi_gem_mmap_offset - Arguments passed to + * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET + */ +struct drm_asahi_gem_mmap_offset { + /** @handle: Handle for the object being mapped. */ + __u32 handle; + + /** @flags: Must be zero */ + __u32 flags; + + /** @offset: The fake offset to use for subsequent mmap call */ + __u64 offset; +}; + +/** + * enum drm_asahi_bind_flags - Flags for GEM binding + */ +enum drm_asahi_bind_flags { + /** + * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range, + * simply unbind the GPU VMA range. + */ + DRM_ASAHI_BIND_UNBIND = (1L << 0), + + /** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */ + DRM_ASAHI_BIND_READ = (1L << 1), + + /** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */ + DRM_ASAHI_BIND_WRITE = (1L << 2), + + /** + * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly + * across the VA range. + * + * This is useful to fill a VA range with scratch pages or zero pages. + * It is intended as a mechanism to accelerate sparse. + */ + DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3), +}; + +/** + * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation. + */ +struct drm_asahi_gem_bind_op { + /** @flags: Combination of drm_asahi_bind_flags flags. */ + __u32 flags; + + /** @handle: GEM object to bind (except for UNBIND) */ + __u32 handle; + + /** + * @offset: Offset into the object (except for UNBIND). + * + * For a regular bind, this is the beginning of the region of the GEM + * object to bind. + * + * For a single-page bind, this is the offset to the single page that + * will be repeatedly bound. + * + * Must be page-size aligned. + */ + __u64 offset; + + /** + * @range: Number of bytes to bind/unbind to @addr. + * + * Must be page-size aligned. + */ + __u64 range; + + /** + * @addr: Address to bind to. + * + * Must be page-size aligned. + */ + __u64 addr; +}; + +/** + * struct drm_asahi_vm_bind - Arguments passed to + * DRM_IOCTL_ASAHI_VM_BIND + */ +struct drm_asahi_vm_bind { + /** @vm_id: The ID of the VM to bind to */ + __u32 vm_id; + + /** @num_binds: number of binds in this IOCTL. */ + __u32 num_binds; + + /** + * @stride: Stride in bytes between consecutive binds. This allows + * extensibility of drm_asahi_gem_bind_op. + */ + __u32 stride; + + /** @pad: MBZ */ + __u32 pad; + + /** + * @userptr: User pointer to an array of @num_binds structures of type + * @drm_asahi_gem_bind_op and size @stride bytes. + */ + __u64 userptr; +}; + +/** + * enum drm_asahi_bind_object_op - Special object bind operation + */ +enum drm_asahi_bind_object_op { + /** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */ + DRM_ASAHI_BIND_OBJECT_OP_BIND = 0, + + /** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */ + DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1, +}; + +/** + * enum drm_asahi_bind_object_flags - Special object bind flags + */ +enum drm_asahi_bind_object_flags { + /** + * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp + * buffer. + */ + DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0), +}; + +/** + * struct drm_asahi_gem_bind_object - Arguments passed to + * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT + */ +struct drm_asahi_gem_bind_object { + /** @op: Bind operation (enum drm_asahi_bind_object_op) */ + __u32 op; + + /** @flags: Combination of drm_asahi_bind_object_flags flags. */ + __u32 flags; + + /** @handle: GEM object to bind/unbind (BIND) */ + __u32 handle; + + /** @vm_id: The ID of the VM to operate on (MBZ currently) */ + __u32 vm_id; + + /** @offset: Offset into the object (BIND only) */ + __u64 offset; + + /** @range: Number of bytes to bind/unbind (BIND only) */ + __u64 range; + + /** @object_handle: Object handle (out for BIND, in for UNBIND) */ + __u32 object_handle; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * enum drm_asahi_cmd_type - Command type + */ +enum drm_asahi_cmd_type { + /** + * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render + * subqueue. Combined vertex and fragment operation. + * + * Followed by a @drm_asahi_cmd_render payload. + */ + DRM_ASAHI_CMD_RENDER = 0, + + /** + * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue. + * + * Followed by a @drm_asahi_cmd_compute payload. + */ + DRM_ASAHI_CMD_COMPUTE = 1, + + /** + * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set + * attachments for subsequent vertex shaders in the same submit. + * + * Followed by (possibly multiple) @drm_asahi_attachment payloads. + */ + DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2, + + /** + * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set + * attachments for subsequent fragment shaders in the same submit. + * + * Followed by (possibly multiple) @drm_asahi_attachment payloads. + */ + DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3, + + /** + * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set + * attachments for subsequent compute shaders in the same submit. + * + * Followed by (possibly multiple) @drm_asahi_attachment payloads. + */ + DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4, +}; + +/** + * enum drm_asahi_priority - Scheduling queue priority. + * + * These priorities are forwarded to the firmware to influence firmware + * scheduling. The exact policy is ultimately decided by firmware, but + * these enums allow userspace to communicate the intentions. + */ +enum drm_asahi_priority { + /** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */ + DRM_ASAHI_PRIORITY_LOW = 0, + + /** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */ + DRM_ASAHI_PRIORITY_MEDIUM = 1, + + /** + * @DRM_ASAHI_PRIORITY_HIGH: High priority queue. + * + * Reserved for future extension. + */ + DRM_ASAHI_PRIORITY_HIGH = 2, + + /** + * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue. + * + * Reserved for future extension. + */ + DRM_ASAHI_PRIORITY_REALTIME = 3, +}; + +/** + * struct drm_asahi_queue_create - Arguments passed to + * DRM_IOCTL_ASAHI_QUEUE_CREATE + */ +struct drm_asahi_queue_create { + /** @flags: MBZ */ + __u32 flags; + + /** @vm_id: The ID of the VM this queue is bound to */ + __u32 vm_id; + + /** @priority: One of drm_asahi_priority */ + __u32 priority; + + /** @queue_id: The returned queue ID */ + __u32 queue_id; + + /** + * @usc_exec_base: GPU base address for all USC binaries (shaders) on + * this queue. USC addresses are 32-bit relative to this 64-bit base. + * + * This sets the following registers on all queue commands: + * + * USC_EXEC_BASE_TA (vertex) + * USC_EXEC_BASE_ISP (fragment) + * USC_EXEC_BASE_CP (compute) + * + * While the hardware lets us configure these independently per command, + * we do not have a use case for this. Instead, we expect userspace to + * fix a 4GiB VA carveout for USC memory and pass its base address here. + */ + __u64 usc_exec_base; +}; + +/** + * struct drm_asahi_queue_destroy - Arguments passed to + * DRM_IOCTL_ASAHI_QUEUE_DESTROY + */ +struct drm_asahi_queue_destroy { + /** @queue_id: The queue ID to be destroyed */ + __u32 queue_id; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * enum drm_asahi_sync_type - Sync item type + */ +enum drm_asahi_sync_type { + /** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */ + DRM_ASAHI_SYNC_SYNCOBJ = 0, + + /** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */ + DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1, +}; + +/** + * struct drm_asahi_sync - Sync item + */ +struct drm_asahi_sync { + /** @sync_type: One of drm_asahi_sync_type */ + __u32 sync_type; + + /** @handle: The sync object handle */ + __u32 handle; + + /** @timeline_value: Timeline value for timeline sync objects */ + __u64 timeline_value; +}; + +/** + * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier + * + * This special value may be passed in to drm_asahi_command::vdm_barrier or + * drm_asahi_command::cdm_barrier to indicate that the respective subqueue + * should not wait on any previous work. + */ +#define DRM_ASAHI_BARRIER_NONE (0xFFFFu) + +/** + * struct drm_asahi_cmd_header - Top level command structure + * + * This struct is core to the command buffer definition and therefore is not + * extensible. + */ +struct drm_asahi_cmd_header { + /** @cmd_type: One of drm_asahi_cmd_type */ + __u16 cmd_type; + + /** + * @size: Size of this command, not including this header. + * + * For hardware commands, this enables extensibility of commands without + * requiring extra command types. Passing a command that is shorter + * than expected is explicitly allowed for backwards-compatibility. + * Truncated fields will be zeroed. + * + * For the synthetic attachment setting commands, this implicitly + * encodes the number of attachments. These commands take multiple + * fixed-size @drm_asahi_attachment structures as their payload, so size + * equals number of attachments * sizeof(struct drm_asahi_attachment). + */ + __u16 size; + + /** + * @vdm_barrier: VDM (render) command index to wait on. + * + * Barriers are indices relative to the beginning of a given submit. A + * barrier of 0 waits on commands submitted to the respective subqueue + * in previous submit ioctls. A barrier of N waits on N previous + * commands on the subqueue within the current submit ioctl. As a + * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any + * commands in the subqueue. + * + * Examples: + * + * 0: This waits on all previous work. + * + * NONE: This does not wait for anything on this subqueue. + * + * 1: This waits on the first render command in the submit. + * This is valid only if there are multiple render commands in the + * same submit. + * + * Barriers are valid only for hardware commands. Synthetic software + * commands to set attachments must pass NONE here. + */ + __u16 vdm_barrier; + + /** + * @cdm_barrier: CDM (compute) command index to wait on. + * + * See @vdm_barrier, and replace VDM/render with CDM/compute. + */ + __u16 cdm_barrier; +}; + +/** + * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT + */ +struct drm_asahi_submit { + /** + * @syncs: An optional pointer to an array of drm_asahi_sync. The first + * @in_sync_count elements are in-syncs, then the remaining + * @out_sync_count elements are out-syncs. Using a single array with + * explicit partitioning simplifies handling. + */ + __u64 syncs; + + /** + * @cmdbuf: Pointer to the command buffer to submit. + * + * This is a flat command buffer. By design, it contains no CPU + * pointers, which makes it suitable for a virtgpu wire protocol without + * requiring any serializing/deserializing step. + * + * It consists of a series of commands. Each command begins with a + * fixed-size @drm_asahi_cmd_header header and is followed by a + * variable-length payload according to the type and size in the header. + * + * The combined count of "real" hardware commands must be nonzero and at + * most drm_asahi_params_global::max_commands_per_submission. + */ + __u64 cmdbuf; + + /** @flags: Flags for command submission (MBZ) */ + __u32 flags; + + /** @queue_id: The queue ID to be submitted to */ + __u32 queue_id; + + /** + * @in_sync_count: Number of sync objects to wait on before starting + * this job. + */ + __u32 in_sync_count; + + /** + * @out_sync_count: Number of sync objects to signal upon completion of + * this job. + */ + __u32 out_sync_count; + + /** @cmdbuf_size: Command buffer size in bytes */ + __u32 cmdbuf_size; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_asahi_attachment - Describe an "attachment". + * + * Attachments are any memory written by shaders, notably including render + * target attachments written by the end-of-tile program. This is purely a hint + * about the accessed memory regions. It is optional to specify, which is + * fortunate as it cannot be specified precisely with bindless access anyway. + * But where possible, it's probably a good idea for userspace to include these + * hints, forwarded to the firmware. + * + * This struct is implicitly sized and therefore is not extensible. + */ +struct drm_asahi_attachment { + /** @pointer: Base address of the attachment */ + __u64 pointer; + + /** @size: Size of the attachment in bytes */ + __u64 size; + + /** @pad: MBZ */ + __u32 pad; + + /** @flags: MBZ */ + __u32 flags; +}; + +enum drm_asahi_render_flags { + /** + * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch + * memory. + */ + DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0), + + /** + * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles. + * This must be set when clearing render targets. + */ + DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1), + + /** + * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single + * cluster (on multi-cluster GPUs) + * + * This harms performance but can workaround certain sync/coherency + * bugs, and therefore is useful for debugging. + */ + DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2), + + /** + * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula. + * + * Graphics specifications contain two alternate formulas for depth + * bias, a float formula used with floating-point depth buffers and an + * integer formula using with unorm depth buffers. This flag specifies + * that the integer formula should be used. If omitted, the float + * formula is used instead. + * + * This corresponds to bit 18 of the relevant hardware control register, + * so we match that here for efficiency. + */ + DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18), +}; + +/** + * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer. + * + * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit. + * There are three hardware registers for each field respectively for loads, + * stores, and partial renders. In practice, it makes sense to set all to the + * same values, except in exceptional cases not yet implemented in userspace, so + * we do not duplicate here for simplicity/efficiency. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_zls_buffer { + /** @base: Base address of the buffer */ + __u64 base; + + /** + * @comp_base: If the load buffer is compressed, address of the + * compression metadata section. + */ + __u64 comp_base; + + /** + * @stride: If layered rendering is enabled, the number of bytes + * between each layer of the buffer. + */ + __u32 stride; + + /** + * @comp_stride: If layered rendering is enabled, the number of bytes + * between each layer of the compression metadata. + */ + __u32 comp_stride; +}; + +/** + * struct drm_asahi_timestamp - Describe a timestamp write. + * + * The firmware can optionally write the GPU timestamp at render pass + * granularities, but it needs to be mapped specially via + * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to + * write as a handle-offset pair, rather than a GPU address like normal. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_timestamp { + /** + * @handle: Handle of the timestamp buffer, or 0 to skip this + * timestamp. If nonzero, this must equal the value returned in + * drm_asahi_gem_bind_object::object_handle. + */ + __u32 handle; + + /** @offset: Offset to write into the timestamp buffer */ + __u32 offset; +}; + +/** + * struct drm_asahi_timestamps - Describe timestamp writes. + * + * Each operation that can be timestamped, can be timestamped at the start and + * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled + * together into drm_asahi_timestamps. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_timestamps { + /** @start: Timestamp recorded at the start of the operation */ + struct drm_asahi_timestamp start; + + /** @end: Timestamp recorded at the end of the operation */ + struct drm_asahi_timestamp end; +}; + +/** + * struct drm_asahi_helper_program - Describe helper program configuration. + * + * The helper program is a compute-like kernel required for various hardware + * functionality. Its most important role is dynamically allocating + * scratch/stack memory for individual subgroups, by partitioning a static + * allocation shared for the whole device. It is supplied by userspace via + * drm_asahi_helper_program and internally dispatched by the hardware as needed. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_helper_program { + /** + * @binary: USC address to the helper program binary. This is a tagged + * pointer with configuration in the bottom bits. + */ + __u32 binary; + + /** @cfg: Additional configuration bits for the helper program. */ + __u32 cfg; + + /** + * @data: Data passed to the helper program. This value is not + * interpreted by the kernel, firmware, or hardware in any way. It is + * simply a sideband for userspace, set with the submit ioctl and read + * via special registers inside the helper program. + * + * In practice, userspace will pass a 64-bit GPU VA here pointing to the + * actual arguments, which presumably don't fit in 64-bits. + */ + __u64 data; +}; + +/** + * struct drm_asahi_bg_eot - Describe a background or end-of-tile program. + * + * The background and end-of-tile programs are dispatched by the hardware at the + * beginning and end of rendering. As the hardware "tilebuffer" is simply local + * memory, these programs are necessary to implement API-level render targets. + * The fragment-like background program is responsible for loading either the + * clear colour or the existing render target contents, while the compute-like + * end-of-tile program stores the tilebuffer contents to memory. + * + * This struct is embedded in other structs and therefore is not extensible. + */ +struct drm_asahi_bg_eot { + /** + * @usc: USC address of the hardware USC words binding resources + * (including images and uniforms) and the program itself. Note this is + * an additional layer of indirection compared to the helper program, + * avoiding the need for a sideband for data. This is a tagged pointer + * with additional configuration in the bottom bits. + */ + __u32 usc; + + /** + * @rsrc_spec: Resource specifier for the program. This is a packed + * hardware data structure describing the required number of registers, + * uniforms, bound textures, and bound samplers. + */ + __u32 rsrc_spec; +}; + +/** + * struct drm_asahi_cmd_render - Command to submit 3D + * + * This command submits a single render pass. The hardware control stream may + * include many draws and subpasses, but within the command, the framebuffer + * dimensions and attachments are fixed. + * + * The hardware requires the firmware to set a large number of Control Registers + * setting up state at render pass granularity before each command rendering 3D. + * The firmware bundles this state into data structures. Unfortunately, we + * cannot expose either any of that directly to userspace, because the + * kernel-firmware ABI is not stable. Although we can guarantee the firmware + * updates in tandem with the kernel, we cannot break old userspace when + * upgrading the firmware and kernel. Therefore, we need to abstract well the + * data structures to avoid tying our hands with future firmwares. + * + * The bulk of drm_asahi_cmd_render therefore consists of values of hardware + * control registers, marshalled via the firmware interface. + * + * The framebuffer/tilebuffer dimensions are also specified here. In addition to + * being passed to the firmware/hardware, the kernel requires these dimensions + * to calculate various essential tiling-related data structures. It is + * unfortunate that our submits are heavier than on vendors with saner + * hardware-software interfaces. The upshot is all of this information is + * readily available to userspace with all current APIs. + * + * It looks odd - but it's not overly burdensome and it ensures we can remain + * compatible with old userspace. + */ +struct drm_asahi_cmd_render { + /** @flags: Combination of drm_asahi_render_flags flags. */ + __u32 flags; + + /** + * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the + * depth/stencil width/height, which may differ from the framebuffer + * width/height. + */ + __u32 isp_zls_pixels; + + /** + * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU + * address to the beginning of the VDM control stream. + */ + __u64 vdm_ctrl_stream_base; + + /** @vertex_helper: Helper program used for the vertex shader */ + struct drm_asahi_helper_program vertex_helper; + + /** @fragment_helper: Helper program used for the fragment shader */ + struct drm_asahi_helper_program fragment_helper; + + /** + * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an + * array of scissor descriptors indexed in the render pass. + */ + __u64 isp_scissor_base; + + /** + * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an + * array of depth bias values indexed in the render pass. + */ + __u64 isp_dbias_base; + + /** + * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an + * array of occlusion query results written by the render pass. + */ + __u64 isp_oclqry_base; + + /** @depth: Depth buffer */ + struct drm_asahi_zls_buffer depth; + + /** @stencil: Stencil buffer */ + struct drm_asahi_zls_buffer stencil; + + /** @zls_ctrl: ZLS_CTRL register value */ + __u64 zls_ctrl; + + /** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */ + __u64 ppp_multisamplectl; + + /** + * @sampler_heap: Base address of the sampler heap. This heap is used + * for both vertex shaders and fragment shaders. The registers are + * per-stage, but there is no known use case for separate heaps. + */ + __u64 sampler_heap; + + /** @ppp_ctrl: PPP_CTRL register value */ + __u32 ppp_ctrl; + + /** @width_px: Framebuffer width in pixels */ + __u16 width_px; + + /** @height_px: Framebuffer height in pixels */ + __u16 height_px; + + /** @layers: Number of layers in the framebuffer */ + __u16 layers; + + /** @sampler_count: Number of samplers in the sampler heap. */ + __u16 sampler_count; + + /** @utile_width_px: Width of a logical tilebuffer tile in pixels */ + __u8 utile_width_px; + + /** @utile_height_px: Height of a logical tilebuffer tile in pixels */ + __u8 utile_height_px; + + /** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */ + __u8 samples; + + /** @sample_size_B: # of bytes in the tilebuffer required per sample. */ + __u8 sample_size_B; + + /** + * @isp_merge_upper_x: 32-bit float used in the hardware triangle + * merging. Calculate as: tan(60 deg) * width. + * + * Making these values UAPI avoids requiring floating-point calculations + * in the kernel in the hot path. + */ + __u32 isp_merge_upper_x; + + /** + * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height. + * See @isp_merge_upper_x. + */ + __u32 isp_merge_upper_y; + + /** @bg: Background program run for each tile at the start */ + struct drm_asahi_bg_eot bg; + + /** @eot: End-of-tile program ran for each tile at the end */ + struct drm_asahi_bg_eot eot; + + /** + * @partial_bg: Background program ran at the start of each tile when + * resuming the render pass during a partial render. + */ + struct drm_asahi_bg_eot partial_bg; + + /** + * @partial_eot: End-of-tile program ran at the end of each tile when + * pausing the render pass during a partial render. + */ + struct drm_asahi_bg_eot partial_eot; + + /** + * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth + * buffer clear value, encoded in the depth buffer's format: either a + * 32-bit float or a 16-bit unorm (with upper bits zeroed). + */ + __u32 isp_bgobjdepth; + + /** + * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits + * contain the stencil buffer clear value. + */ + __u32 isp_bgobjvals; + + /** @ts_vtx: Timestamps for the vertex portion of the render */ + struct drm_asahi_timestamps ts_vtx; + + /** @ts_frag: Timestamps for the fragment portion of the render */ + struct drm_asahi_timestamps ts_frag; +}; + +/** + * struct drm_asahi_cmd_compute - Command to submit compute + * + * This command submits a control stream consisting of compute dispatches. There + * is essentially no limit on how many compute dispatches may be included in a + * single compute command, although timestamps are at command granularity. + */ +struct drm_asahi_cmd_compute { + /** @flags: MBZ */ + __u32 flags; + + /** @sampler_count: Number of samplers in the sampler heap. */ + __u32 sampler_count; + + /** + * @cdm_ctrl_stream_base: CDM_CTRL_STREAM_BASE register value. GPU + * address to the beginning of the CDM control stream. + */ + __u64 cdm_ctrl_stream_base; + + /** + * @cdm_ctrl_stream_end: GPU base address to the end of the hardware + * control stream. Note this only considers the first contiguous segment + * of the control stream, as the stream might jump elsewhere. + */ + __u64 cdm_ctrl_stream_end; + + /** @sampler_heap: Base address of the sampler heap. */ + __u64 sampler_heap; + + /** @helper: Helper program used for this compute command */ + struct drm_asahi_helper_program helper; + + /** @ts: Timestamps for the compute command */ + struct drm_asahi_timestamps ts; +}; + +/** + * struct drm_asahi_get_time - Arguments passed to DRM_IOCTL_ASAHI_GET_TIME + */ +struct drm_asahi_get_time { + /** @flags: MBZ. */ + __u64 flags; + + /** @gpu_timestamp: On return, the GPU timestamp in nanoseconds. */ + __u64 gpu_timestamp; +}; + +/** + * DRM_IOCTL_ASAHI() - Build an Asahi IOCTL number + * @__access: Access type. Must be R, W or RW. + * @__id: One of the DRM_ASAHI_xxx id. + * @__type: Suffix of the type being passed to the IOCTL. + * + * Don't use this macro directly, use the DRM_IOCTL_ASAHI_xxx + * values instead. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define DRM_IOCTL_ASAHI(__access, __id, __type) \ + DRM_IO ## __access(DRM_COMMAND_BASE + DRM_ASAHI_ ## __id, \ + struct drm_asahi_ ## __type) + +/* Note: this is an enum so that it can be resolved by Rust bindgen. */ +enum { + DRM_IOCTL_ASAHI_GET_PARAMS = DRM_IOCTL_ASAHI(W, GET_PARAMS, get_params), + DRM_IOCTL_ASAHI_GET_TIME = DRM_IOCTL_ASAHI(WR, GET_TIME, get_time), + DRM_IOCTL_ASAHI_VM_CREATE = DRM_IOCTL_ASAHI(WR, VM_CREATE, vm_create), + DRM_IOCTL_ASAHI_VM_DESTROY = DRM_IOCTL_ASAHI(W, VM_DESTROY, vm_destroy), + DRM_IOCTL_ASAHI_VM_BIND = DRM_IOCTL_ASAHI(W, VM_BIND, vm_bind), + DRM_IOCTL_ASAHI_GEM_CREATE = DRM_IOCTL_ASAHI(WR, GEM_CREATE, gem_create), + DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET = DRM_IOCTL_ASAHI(WR, GEM_MMAP_OFFSET, gem_mmap_offset), + DRM_IOCTL_ASAHI_GEM_BIND_OBJECT = DRM_IOCTL_ASAHI(WR, GEM_BIND_OBJECT, gem_bind_object), + DRM_IOCTL_ASAHI_QUEUE_CREATE = DRM_IOCTL_ASAHI(WR, QUEUE_CREATE, queue_create), + DRM_IOCTL_ASAHI_QUEUE_DESTROY = DRM_IOCTL_ASAHI(W, QUEUE_DESTROY, queue_destroy), + DRM_IOCTL_ASAHI_SUBMIT = DRM_IOCTL_ASAHI(W, SUBMIT, submit), +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _ASAHI_DRM_H_ */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 7fba37b94401..e63a71d3c607 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -905,13 +905,17 @@ struct drm_syncobj_destroy { }; #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0) +#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1) #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0) +#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1) struct drm_syncobj_handle { __u32 handle; __u32 flags; __s32 fd; __u32 pad; + + __u64 point; }; struct drm_syncobj_transfer { diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index e41a3cec6a9e..81202a50dc9e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -422,6 +422,7 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09 #define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a #define DRM_FORMAT_MOD_VENDOR_MTK 0x0b +#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c /* add more to the end as needed */ @@ -1495,6 +1496,50 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S) /* + * Apple GPU-tiled layouts. + * + * Apple GPUs support nonlinear tilings with optional lossless compression. + * + * GPU-tiled images are divided into 16KiB tiles: + * + * Bytes per pixel Tile size + * --------------- --------- + * 1 128x128 + * 2 128x64 + * 4 64x64 + * 8 64x32 + * 16 32x32 + * + * Tiles are raster-order. Pixels within a tile are interleaved (Morton order). + * + * Compressed images pad the body to 128-bytes and are immediately followed by a + * metadata section. The metadata section rounds the image dimensions to + * powers-of-two and contains 8 bytes for each 16x16 compression subtile. + * Subtiles are interleaved (Morton order). + * + * All images are 128-byte aligned. + * + * These layouts fundamentally do not have meaningful strides. No matter how we + * specify strides for these layouts, userspace unaware of Apple image layouts + * will be unable to use correctly the specified stride for any purpose. + * Userspace aware of the image layouts do not use strides. The most "correct" + * convention would be setting the image stride to 0. Unfortunately, some + * software assumes the stride is at least (width * bytes per pixel). We + * therefore require that stride equals (width * bytes per pixel). Since the + * stride is arbitrary here, we pick the simplest convention. + * + * Although containing two sections, compressed image layouts are treated in + * software as a single plane. This is modelled after AFBC, a similar + * scheme. Attempting to separate the sections to be "explicit" in DRM would + * only generate more confusion, as software does not treat the image this way. + * + * For detailed information on the hardware image layouts, see + * https://docs.mesa3d.org/drivers/asahi.html#image-layouts + */ +#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1) +#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2) + +/* * AMD modifiers * * Memory layout: diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index 97e2c4510e69..ad9a70afea6c 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -127,6 +127,9 @@ enum drm_panthor_ioctl_id { /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */ DRM_PANTHOR_TILER_HEAP_DESTROY, + + /** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */ + DRM_PANTHOR_BO_SET_LABEL, }; /** @@ -978,6 +981,24 @@ struct drm_panthor_tiler_heap_destroy { }; /** + * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL + */ +struct drm_panthor_bo_set_label { + /** @handle: Handle of the buffer object to label. */ + __u32 handle; + + /** @pad: MBZ. */ + __u32 pad; + + /** + * @label: User pointer to a NUL-terminated string + * + * Length cannot be greater than 4096 + */ + __u64 label; +}; + +/** * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number * @__access: Access type. Must be R, W or RW. * @__id: One of the DRM_PANTHOR_xxx id. @@ -1019,6 +1040,8 @@ enum { DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create), DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY = DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy), + DRM_IOCTL_PANTHOR_BO_SET_LABEL = + DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label), }; #if defined(__cplusplus) diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index c2ce71987e9b..9debb320c34b 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -163,6 +163,12 @@ struct drm_virtgpu_3d_wait { __u32 flags; }; +#define VIRTGPU_DRM_CAPSET_VIRGL 1 +#define VIRTGPU_DRM_CAPSET_VIRGL2 2 +#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3 +#define VIRTGPU_DRM_CAPSET_VENUS 4 +#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5 +#define VIRTGPU_DRM_CAPSET_DRM 6 struct drm_virtgpu_get_caps { __u32 cap_set_id; __u32 cap_set_ver; diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 616916985e3f..9c08738c3b91 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -917,7 +917,11 @@ struct drm_xe_gem_mmap_offset { * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE * * The @flags can be: - * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE + * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address + * space of the VM to scratch page. A vm_bind would overwrite the scratch + * page mapping. This flag is mutually exclusive with the + * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and + * xe3 platform. * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts * exec submissions to its exec_queues that don't have an upper time * limit on the job execution time. But exec submissions to these diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index bf2c9cabd207..be109777d10d 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -309,8 +309,9 @@ struct virtio_gpu_cmd_submit { #define VIRTIO_GPU_CAPSET_VIRGL 1 #define VIRTIO_GPU_CAPSET_VIRGL2 2 -/* 3 is reserved for gfxstream */ +#define VIRTIO_GPU_CAPSET_GFXSTREAM_VULKAN 3 #define VIRTIO_GPU_CAPSET_VENUS 4 +#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5 #define VIRTIO_GPU_CAPSET_DRM 6 /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ diff --git a/include/video/pixel_format.h b/include/video/pixel_format.h new file mode 100644 index 000000000000..b5104b2a3a13 --- /dev/null +++ b/include/video/pixel_format.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef VIDEO_PIXEL_FORMAT_H +#define VIDEO_PIXEL_FORMAT_H + +struct pixel_format { + unsigned char bits_per_pixel; + bool indexed; + union { + struct { + struct { + unsigned char offset; + unsigned char length; + } alpha, red, green, blue; + }; + struct { + unsigned char offset; + unsigned char length; + } index; + }; +}; + +#define PIXEL_FORMAT_XRGB1555 \ + { 16, false, { .alpha = {0, 0}, .red = {10, 5}, .green = {5, 5}, .blue = {0, 5} } } + +#define PIXEL_FORMAT_RGB565 \ + { 16, false, { .alpha = {0, 0}, .red = {11, 5}, .green = {5, 6}, .blue = {0, 5} } } + +#define PIXEL_FORMAT_RGB888 \ + { 24, false, { .alpha = {0, 0}, .red = {16, 8}, .green = {8, 8}, .blue = {0, 8} } } + +#define PIXEL_FORMAT_XRGB8888 \ + { 32, false, { .alpha = {0, 0}, .red = {16, 8}, .green = {8, 8}, .blue = {0, 8} } } + +#define PIXEL_FORMAT_XBGR8888 \ + { 32, false, { .alpha = {0, 0}, .red = {0, 8}, .green = {8, 8}, .blue = {16, 8} } } + +#define PIXEL_FORMAT_XRGB2101010 \ + { 32, false, { .alpha = {0, 0}, .red = {20, 10}, .green = {10, 10}, .blue = {0, 10} } } + +#endif diff --git a/lib/tests/printf_kunit.c b/lib/tests/printf_kunit.c index 2c9f6170bacd..b1fa0dcea52f 100644 --- a/lib/tests/printf_kunit.c +++ b/lib/tests/printf_kunit.c @@ -701,21 +701,46 @@ static void fwnode_pointer(struct kunit *kunittest) software_node_unregister_node_group(group); } +struct fourcc_struct { + u32 code; + const char *str; +}; + +static void fourcc_pointer_test(struct kunit *kunittest, const struct fourcc_struct *fc, + size_t n, const char *fmt) +{ + size_t i; + + for (i = 0; i < n; i++) + test(fc[i].str, fmt, &fc[i].code); +} + static void fourcc_pointer(struct kunit *kunittest) { - struct { - u32 code; - char *str; - } const try[] = { + static const struct fourcc_struct try_cc[] = { { 0x3231564e, "NV12 little-endian (0x3231564e)", }, { 0xb231564e, "NV12 big-endian (0xb231564e)", }, { 0x10111213, ".... little-endian (0x10111213)", }, { 0x20303159, "Y10 little-endian (0x20303159)", }, }; - unsigned int i; + static const struct fourcc_struct try_ch[] = { + { 0x41424344, "ABCD (0x41424344)", }, + }; + static const struct fourcc_struct try_cn[] = { + { 0x41424344, "DCBA (0x44434241)", }, + }; + static const struct fourcc_struct try_cl[] = { + { (__force u32)cpu_to_le32(0x41424344), "ABCD (0x41424344)", }, + }; + static const struct fourcc_struct try_cb[] = { + { (__force u32)cpu_to_be32(0x41424344), "ABCD (0x41424344)", }, + }; - for (i = 0; i < ARRAY_SIZE(try); i++) - test(try[i].str, "%p4cc", &try[i].code); + fourcc_pointer_test(kunittest, try_cc, ARRAY_SIZE(try_cc), "%p4cc"); + fourcc_pointer_test(kunittest, try_ch, ARRAY_SIZE(try_ch), "%p4ch"); + fourcc_pointer_test(kunittest, try_cn, ARRAY_SIZE(try_cn), "%p4cn"); + fourcc_pointer_test(kunittest, try_cl, ARRAY_SIZE(try_cl), "%p4cl"); + fourcc_pointer_test(kunittest, try_cb, ARRAY_SIZE(try_cb), "%p4cb"); } static void diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 01699852f30c..6bc64ae52166 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1793,27 +1793,50 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc, char output[sizeof("0123 little-endian (0x01234567)")]; char *p = output; unsigned int i; + bool pixel_fmt = false; u32 orig, val; - if (fmt[1] != 'c' || fmt[2] != 'c') + if (fmt[1] != 'c') return error_string(buf, end, "(%p4?)", spec); if (check_pointer(&buf, end, fourcc, spec)) return buf; orig = get_unaligned(fourcc); - val = orig & ~BIT(31); + switch (fmt[2]) { + case 'h': + break; + case 'n': + orig = swab32(orig); + break; + case 'l': + orig = (__force u32)cpu_to_le32(orig); + break; + case 'b': + orig = (__force u32)cpu_to_be32(orig); + break; + case 'c': + /* Pixel formats are printed LSB-first */ + pixel_fmt = true; + break; + default: + return error_string(buf, end, "(%p4?)", spec); + } + + val = pixel_fmt ? swab32(orig & ~BIT(31)) : orig; for (i = 0; i < sizeof(u32); i++) { - unsigned char c = val >> (i * 8); + unsigned char c = val >> ((3 - i) * 8); /* Print non-control ASCII characters as-is, dot otherwise */ *p++ = isascii(c) && isprint(c) ? c : '.'; } - *p++ = ' '; - strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian"); - p += strlen(p); + if (pixel_fmt) { + *p++ = ' '; + strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian"); + p += strlen(p); + } *p++ = ' '; *p++ = '('; diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 3d22bf863eec..44e233b6f428 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -6891,7 +6891,7 @@ sub process { ($extension eq "f" && defined $qualifier && $qualifier !~ /^w/) || ($extension eq "4" && - defined $qualifier && $qualifier !~ /^cc/)) { + defined $qualifier && $qualifier !~ /^c[hnlbc]/)) { $bad_specifier = $specifier; last; } |