diff options
Diffstat (limited to 'drivers/gpu/drm')
140 files changed, 8058 insertions, 1707 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index bd943a71756c..5e1bc630b885 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER help Choose this if you need the KMS CMA helper functions +config DRM_GEM_SHMEM_HELPER + bool + depends on DRM + help + Choose this if you need the GEM shmem helper functions + config DRM_VM bool depends on DRM && MMU @@ -329,6 +335,8 @@ source "drivers/gpu/drm/tve200/Kconfig" source "drivers/gpu/drm/xen/Kconfig" +source "drivers/gpu/drm/vboxvideo/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1ac55c65eac0..e630eccb951c 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -25,6 +25,7 @@ drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_VM) += drm_vm.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o +drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o drm-$(CONFIG_PCI) += ati_pcigart.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o @@ -109,3 +110,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ obj-$(CONFIG_DRM_XEN) += xen/ +obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7419ea8a388b..8a0732088640 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -974,6 +974,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); drm_dev_unplug(dev); + drm_dev_put(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9fc3296592fe..98fd9208877f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -886,7 +886,7 @@ static int gmc_v6_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); r = gmc_v6_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 761dcfb2fec0..3e9c5034febe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1030,7 +1030,7 @@ static int gmc_v7_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); pr_warn("amdgpu: No coherent DMA available\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); r = gmc_v7_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 34440672f938..29dde64bf2e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1155,7 +1155,7 @@ static int gmc_v8_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); pr_warn("amdgpu: No coherent DMA available\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); r = gmc_v8_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2fe8397241ea..53327498efbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1011,7 +1011,7 @@ static int gmc_v9_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); if (adev->gmc.xgmi.supported) { r = gfxhub_v1_1_get_xgmi_info(adev); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index ab50ad06e271..21725c9b9f5e 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -264,37 +264,17 @@ static bool malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info; - - if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) { - DRM_DEBUG_KMS("Unknown modifier (not Arm)\n"); - return false; - } - - if (mode_cmd->modifier[0] & - ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) { - DRM_DEBUG_KMS("Unsupported modifiers\n"); - return false; - } - - info = drm_get_format_info(dev, mode_cmd); - if (!info) { - DRM_DEBUG_KMS("Unable to get the format information\n"); + if (malidp_format_mod_supported(dev, mode_cmd->pixel_format, + mode_cmd->modifier[0]) == false) return false; - } - - if (info->num_planes != 1) { - DRM_DEBUG_KMS("AFBC buffers expect one plane\n"); - return false; - } if (mode_cmd->offsets[0] != 0) { DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); return false; } - switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { - case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: + switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { + case AFBC_SIZE_16X16: if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); return false; @@ -318,9 +298,10 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev, struct drm_gem_object *objs = NULL; u32 afbc_superblock_size = 0, afbc_superblock_height = 0; u32 afbc_superblock_width = 0, afbc_size = 0; + int bpp = 0; - switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { - case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: + switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { + case AFBC_SIZE_16X16: afbc_superblock_height = 16; afbc_superblock_width = 16; break; @@ -334,15 +315,19 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev, n_superblocks = (mode_cmd->width / afbc_superblock_width) * (mode_cmd->height / afbc_superblock_height); - afbc_superblock_size = info->cpp[0] * afbc_superblock_width * - afbc_superblock_height; + bpp = malidp_format_get_bpp(info->format); + + afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height) + / BITS_PER_BYTE; afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); - if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { - DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n", - mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]); + if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) { + DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) " + "should be same as width (=%u) * bpp (=%u)\n", + (mode_cmd->pitches[0] * BITS_PER_BYTE), + mode_cmd->width, bpp); return false; } @@ -406,6 +391,7 @@ static int malidp_init(struct drm_device *drm) drm->mode_config.max_height = hwdev->max_line_size; drm->mode_config.funcs = &malidp_mode_config_funcs; drm->mode_config.helper_private = &malidp_mode_config_helpers; + drm->mode_config.allow_fb_modifiers = true; ret = malidp_crtc_init(drm); if (ret) diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h index b76c86f18a56..019a682b2716 100644 --- a/drivers/gpu/drm/arm/malidp_drv.h +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -90,6 +90,12 @@ struct malidp_crtc_state { int malidp_de_planes_init(struct drm_device *drm); int malidp_crtc_init(struct drm_device *drm); +bool malidp_hw_format_is_linear_only(u32 format); +bool malidp_hw_format_is_afbc_only(u32 format); + +bool malidp_format_mod_supported(struct drm_device *drm, + u32 format, u64 modifier); + #ifdef CONFIG_DEBUG_FS void malidp_error(struct malidp_drm *malidp, struct malidp_error_stats *error_stats, u32 status, diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index b9bed1138fa3..8df12e9a33bb 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -49,11 +49,19 @@ static const struct malidp_format_id malidp500_de_formats[] = { { DRM_FORMAT_YUYV, DE_VIDEO1, 13 }, { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 }, { DRM_FORMAT_YUV420, DE_VIDEO1, 15 }, + { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 }, + /* These are supported with AFBC only */ + { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 }, + { DRM_FORMAT_VUY888, DE_VIDEO1, 16 }, + { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 }, + { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 } }; #define MALIDP_ID(__group, __format) \ ((((__group) & 0x7) << 3) | ((__format) & 0x7)) +#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1) + #define MALIDP_COMMON_FORMATS \ /* fourcc, layers supporting the format, internal id */ \ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \ @@ -74,11 +82,25 @@ static const struct malidp_format_id malidp500_de_formats[] = { { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \ { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \ { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \ + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \ { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \ + /* This is only supported with linear modifier */ \ { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \ - { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)} + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \ + { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \ + { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)} static const struct malidp_format_id malidp550_de_formats[] = { MALIDP_COMMON_FORMATS, @@ -94,11 +116,14 @@ static const struct malidp_layer malidp500_layers[] = { * yuv2rgb matrix offset, mmu control register offset, rotation_features */ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, - MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY }, + MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP500_DE_LV_AD_CTRL }, { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, - MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP500_DE_LG1_AD_CTRL }, { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, - MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP500_DE_LG2_AD_CTRL }, }; static const struct malidp_layer malidp550_layers[] = { @@ -106,13 +131,16 @@ static const struct malidp_layer malidp550_layers[] = { * yuv2rgb matrix offset, mmu control register offset, rotation_features */ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, - MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY }, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP550_DE_LV1_AD_CTRL }, { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, - MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP550_DE_LG_AD_CTRL }, { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, - MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY }, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP550_DE_LV2_AD_CTRL }, { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, - MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE }, + MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 }, }; static const struct malidp_layer malidp650_layers[] = { @@ -122,16 +150,44 @@ static const struct malidp_layer malidp650_layers[] = { */ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, - MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY }, + MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY, + MALIDP550_DE_LV1_AD_CTRL }, { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL, - ROTATE_COMPRESSED }, + ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL }, { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, - MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY }, + MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY, + MALIDP550_DE_LV2_AD_CTRL }, { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL, - ROTATE_NONE }, + ROTATE_NONE, 0 }, +}; + +const u64 malidp_format_modifiers[] = { + /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR), + + /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT), + + /* All 8 or 10 bit YUV 444 formats. */ + /* In DP550, 10 bit YUV 420 format also supported */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT), + + /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16), + + /* YUV 420, 422 P1 8, 10 bit formats */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR), + + /* All formats */ + DRM_FORMAT_MOD_LINEAR, + + DRM_FORMAT_MOD_INVALID }; #define SE_N_SCALING_COEFFS 96 @@ -324,14 +380,39 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode * malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); } -static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +int malidp_format_get_bpp(u32 fmt) +{ + int bpp = drm_format_plane_cpp(fmt, 0) * 8; + + if (bpp == 0) { + switch (fmt) { + case DRM_FORMAT_VUY101010: + bpp = 30; + case DRM_FORMAT_YUV420_10BIT: + bpp = 15; + break; + case DRM_FORMAT_YUV420_8BIT: + bpp = 12; + break; + default: + bpp = 0; + } + } + + return bpp; +} + +static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) { /* * Each layer needs enough rotation memory to fit 8 lines * worth of pixel data. Required size is then: * size = rotated_width * (bpp / 8) * 8; */ - return w * drm_format_plane_cpp(fmt, 0) * 8; + int bpp = malidp_format_get_bpp(fmt); + + return w * bpp; } static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev, @@ -609,9 +690,9 @@ static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode * malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); } -static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +static int malidpx50_get_bytes_per_column(u32 fmt) { - u32 bytes_per_col; + u32 bytes_per_column; switch (fmt) { /* 8 lines at 4 bytes per pixel */ @@ -637,19 +718,77 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 case DRM_FORMAT_UYVY: case DRM_FORMAT_YUYV: case DRM_FORMAT_X0L0: - case DRM_FORMAT_X0L2: - bytes_per_col = 32; + bytes_per_column = 32; break; /* 16 lines at 1.5 bytes per pixel */ case DRM_FORMAT_NV12: case DRM_FORMAT_YUV420: - bytes_per_col = 24; + /* 8 lines at 3 bytes per pixel */ + case DRM_FORMAT_VUY888: + /* 16 lines at 12 bits per pixel */ + case DRM_FORMAT_YUV420_8BIT: + /* 8 lines at 3 bytes per pixel */ + case DRM_FORMAT_P010: + bytes_per_column = 24; + break; + /* 8 lines at 30 bits per pixel */ + case DRM_FORMAT_VUY101010: + /* 16 lines at 15 bits per pixel */ + case DRM_FORMAT_YUV420_10BIT: + bytes_per_column = 30; break; default: return -EINVAL; } - return w * bytes_per_col; + return bytes_per_column; +} + +static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + int bytes_per_column = 0; + + switch (fmt) { + /* 8 lines at 15 bits per pixel */ + case DRM_FORMAT_YUV420_10BIT: + bytes_per_column = 15; + break; + /* Uncompressed YUV 420 10 bit single plane cannot be rotated */ + case DRM_FORMAT_X0L2: + if (has_modifier) + bytes_per_column = 8; + else + return -EINVAL; + break; + default: + bytes_per_column = malidpx50_get_bytes_per_column(fmt); + } + + if (bytes_per_column == -EINVAL) + return bytes_per_column; + + return w * bytes_per_column; +} + +static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + int bytes_per_column = 0; + + switch (fmt) { + /* 16 lines at 2 bytes per pixel */ + case DRM_FORMAT_X0L2: + bytes_per_column = 32; + break; + default: + bytes_per_column = malidpx50_get_bytes_per_column(fmt); + } + + if (bytes_per_column == -EINVAL) + return bytes_per_column; + + return w * bytes_per_column; } static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, @@ -838,7 +977,10 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .se_base = MALIDP550_SE_BASE, .dc_base = MALIDP550_DC_BASE, .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, - .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .features = MALIDP_REGMAP_HAS_CLEARIRQ | + MALIDP_DEVICE_AFBC_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUYV_USE_422_P2, .n_layers = ARRAY_SIZE(malidp550_layers), .layers = malidp550_layers, .de_irq_map = { @@ -884,7 +1026,9 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .se_base = MALIDP550_SE_BASE, .dc_base = MALIDP550_DC_BASE, .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, - .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .features = MALIDP_REGMAP_HAS_CLEARIRQ | + MALIDP_DEVICE_AFBC_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUYV_USE_422_P2, .n_layers = ARRAY_SIZE(malidp650_layers), .layers = malidp650_layers, .de_irq_map = { @@ -923,7 +1067,7 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .in_config_mode = malidp550_in_config_mode, .set_config_valid = malidp550_set_config_valid, .modeset = malidp550_modeset, - .rotmem_required = malidp550_rotmem_required, + .rotmem_required = malidp650_rotmem_required, .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, .se_calc_mclk = malidp550_se_calc_mclk, .enable_memwrite = malidp550_enable_memwrite, @@ -933,19 +1077,72 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { }; u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, - u8 layer_id, u32 format) + u8 layer_id, u32 format, bool has_modifier) { unsigned int i; for (i = 0; i < map->n_pixel_formats; i++) { if (((map->pixel_formats[i].layer & layer_id) == layer_id) && - (map->pixel_formats[i].format == format)) - return map->pixel_formats[i].id; + (map->pixel_formats[i].format == format)) { + /* + * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier + * is supported by a different h/w format id than + * DRM_FORMAT_YUYV (only). + */ + if (format == DRM_FORMAT_YUYV && + (has_modifier) && + (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2)) + return AFBC_YUV_422_FORMAT_ID; + else + return map->pixel_formats[i].id; + } } return MALIDP_INVALID_FORMAT_ID; } +bool malidp_hw_format_is_linear_only(u32 format) +{ + switch (format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_X0L2: + case DRM_FORMAT_X0L0: + return true; + default: + return false; + } +} + +bool malidp_hw_format_is_afbc_only(u32 format) +{ + switch (format) { + case DRM_FORMAT_VUY888: + case DRM_FORMAT_VUY101010: + case DRM_FORMAT_YUV420_8BIT: + case DRM_FORMAT_YUV420_10BIT: + return true; + default: + return false; + } +} + static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq) { u32 base = malidp_get_block_base(hwdev, block); diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 40155e2ea9d9..207c3ce52f1a 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -70,6 +70,8 @@ struct malidp_layer { s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */ u16 mmu_ctrl_offset; /* offset to the MMU control register */ enum rotation_features rot; /* type of rotation supported */ + /* address offset for the AFBC decoder registers */ + u16 afbc_decoder_offset; }; enum malidp_scaling_coeff_set { @@ -93,7 +95,10 @@ struct malidp_se_config { }; /* regmap features */ -#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) +#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0) +#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1) +#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2) +#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3) struct malidp_hw_regmap { /* address offset of the DE register bank */ @@ -179,7 +184,8 @@ struct malidp_hw { * Calculate the required rotation memory given the active area * and the buffer format. */ - int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); + int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, + u32 fmt, bool has_modifier); int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev, struct malidp_se_config *se_config, @@ -319,7 +325,9 @@ int malidp_se_irq_init(struct drm_device *drm, int irq); void malidp_se_irq_fini(struct malidp_hw_device *hwdev); u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, - u8 layer_id, u32 format); + u8 layer_id, u32 format, bool has_modifier); + +int malidp_format_get_bpp(u32 fmt); static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated) { @@ -388,9 +396,18 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev) #define MALIDP_GAMMA_LUT_SIZE 4096 -#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \ - AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \ - AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \ - AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC) +#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK +#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 +#define AFBC_YTR AFBC_FORMAT_MOD_YTR +#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE +#define AFBC_CBR AFBC_FORMAT_MOD_CBR +#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT +#define AFBC_TILED AFBC_FORMAT_MOD_TILED +#define AFBC_SC AFBC_FORMAT_MOD_SC + +#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \ + AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC) + +extern const u64 malidp_format_modifiers[]; #endif /* __MALIDP_HW_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 87627219ce3b..5f102bdaf841 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -141,9 +141,14 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, return -EINVAL; } + if (fb->modifier) { + DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n"); + return -EINVAL; + } + mw_state->format = malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE, - fb->format->format); + fb->format->format, !!fb->modifier); if (mw_state->format == MALIDP_INVALID_FORMAT_ID) { struct drm_format_name_buf format_name; diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index c9a6d3e0cada..d42e0ea9a303 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -52,6 +52,8 @@ #define MALIDP550_LS_ENABLE 0x01c #define MALIDP550_LS_R1_IN_SIZE 0x020 +#define MODIFIERS_COUNT_MAX 15 + /* * This 4-entry look-up-table is used to determine the full 8-bit alpha value * for formats with 1- or 2-bit alpha channels. @@ -145,6 +147,119 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p, drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize); } +bool malidp_format_mod_supported(struct drm_device *drm, + u32 format, u64 modifier) +{ + const struct drm_format_info *info; + const u64 *modifiers; + struct malidp_drm *malidp = drm->dev_private; + const struct malidp_hw_regmap *map = &malidp->dev->hw->map; + + if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) + return false; + + /* Some pixel formats are supported without any modifier */ + if (modifier == DRM_FORMAT_MOD_LINEAR) { + /* + * However these pixel formats need to be supported with + * modifiers only + */ + return !malidp_hw_format_is_afbc_only(format); + } + + if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) { + DRM_ERROR("Unknown modifier (not Arm)\n"); + return false; + } + + if (modifier & + ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) { + DRM_DEBUG_KMS("Unsupported modifiers\n"); + return false; + } + + modifiers = malidp_format_modifiers; + + /* SPLIT buffers must use SPARSE layout */ + if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE))) + return false; + + /* CBR only applies to YUV formats, where YTR should be always 0 */ + if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR))) + return false; + + while (*modifiers != DRM_FORMAT_MOD_INVALID) { + if (*modifiers == modifier) + break; + + modifiers++; + } + + /* return false, if the modifier was not found */ + if (*modifiers == DRM_FORMAT_MOD_INVALID) { + DRM_DEBUG_KMS("Unsupported modifier\n"); + return false; + } + + info = drm_format_info(format); + + if (info->num_planes != 1) { + DRM_DEBUG_KMS("AFBC buffers expect one plane\n"); + return false; + } + + if (malidp_hw_format_is_linear_only(format) == true) { + DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n", + format); + return false; + } + + /* + * RGB formats need to provide YTR modifier and YUV formats should not + * provide YTR modifier. + */ + if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) { + DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n", + info->is_yuv ? "disallowed" : "mandatory", + info->is_yuv ? "YUV" : "RGB"); + return false; + } + + if (modifier & AFBC_SPLIT) { + if (!info->is_yuv) { + if (drm_format_plane_cpp(format, 0) <= 2) { + DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n"); + return false; + } + } + + if ((drm_format_horz_chroma_subsampling(format) != 1) || + (drm_format_vert_chroma_subsampling(format) != 1)) { + if (!(format == DRM_FORMAT_YUV420_10BIT && + (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) { + DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n"); + return false; + } + } + } + + if (modifier & AFBC_CBR) { + if ((drm_format_horz_chroma_subsampling(format) == 1) || + (drm_format_vert_chroma_subsampling(format) == 1)) { + DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n"); + return false; + } + } + + return true; +} + +static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane, + u32 format, u64 modifier) +{ + return malidp_format_mod_supported(plane->dev, format, modifier); +} + static const struct drm_plane_funcs malidp_de_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -153,6 +268,7 @@ static const struct drm_plane_funcs malidp_de_plane_funcs = { .atomic_duplicate_state = malidp_duplicate_plane_state, .atomic_destroy_state = malidp_destroy_plane_state, .atomic_print_state = malidp_plane_atomic_print_state, + .format_mod_supported = malidp_format_mod_supported_per_plane, }; static int malidp_se_check_scaling(struct malidp_plane *mp, @@ -406,8 +522,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, fb = state->fb; ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, - mp->layer->id, - fb->format->format); + mp->layer->id, fb->format->format, + !!fb->modifier); if (ms->format == MALIDP_INVALID_FORMAT_ID) return -EINVAL; @@ -415,8 +531,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, for (i = 0; i < ms->n_planes; i++) { u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated); - if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) - & (alignment - 1)) { + if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) + & (alignment - 1)) && !(fb->modifier)) { DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", fb->pitches[i], i); return -EINVAL; @@ -469,13 +585,20 @@ static int malidp_de_plane_check(struct drm_plane *plane, return -EINVAL; } + /* SMART layer does not support AFBC */ + if (mp->layer->id == DE_SMART && fb->modifier) { + DRM_ERROR("AFBC framebuffer not supported in SMART layer"); + return -EINVAL; + } + ms->rotmem_size = 0; if (state->rotation & MALIDP_ROTATED_MASK) { int val; val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, state->crtc_h, - fb->format->format); + fb->format->format, + !!(fb->modifier)); if (val < 0) return val; @@ -592,6 +715,83 @@ static void malidp_de_set_mmu_control(struct malidp_plane *mp, mp->layer->base + mp->layer->mmu_ctrl_offset); } +static void malidp_set_plane_base_addr(struct drm_framebuffer *fb, + struct malidp_plane *mp, + int plane_index) +{ + dma_addr_t paddr; + u16 ptr; + struct drm_plane *plane = &mp->base; + bool afbc = fb->modifier ? true : false; + + ptr = mp->layer->ptr + (plane_index << 4); + + /* + * drm_fb_cma_get_gem_addr() alters the physical base address of the + * framebuffer as per the plane's src_x, src_y co-ordinates (ie to + * take care of source cropping). + * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H + * and _AD_CROP_V registers. + */ + if (!afbc) { + paddr = drm_fb_cma_get_gem_addr(fb, plane->state, + plane_index); + } else { + struct drm_gem_cma_object *obj; + + obj = drm_fb_cma_get_gem_obj(fb, plane_index); + + if (WARN_ON(!obj)) + return; + paddr = obj->paddr; + } + + malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr); + malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4); +} + +static void malidp_de_set_plane_afbc(struct drm_plane *plane) +{ + struct malidp_plane *mp; + u32 src_w, src_h, val = 0, src_x, src_y; + struct drm_framebuffer *fb = plane->state->fb; + + mp = to_malidp_plane(plane); + + /* no afbc_decoder_offset means AFBC is not supported on this plane */ + if (!mp->layer->afbc_decoder_offset) + return; + + if (!fb->modifier) { + malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset); + return; + } + + /* convert src values from Q16 fixed point to integer */ + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + + val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) | + src_x; + malidp_hw_write(mp->hwdev, val, + mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H); + + val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) | + src_y; + malidp_hw_write(mp->hwdev, val, + mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V); + + val = MALIDP_AD_EN; + if (fb->modifier & AFBC_FORMAT_MOD_SPLIT) + val |= MALIDP_AD_BS; + if (fb->modifier & AFBC_FORMAT_MOD_YTR) + val |= MALIDP_AD_YTR; + + malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset); +} + static void malidp_de_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -602,12 +802,23 @@ static void malidp_de_plane_update(struct drm_plane *plane, u8 plane_alpha = state->alpha >> 8; u32 src_w, src_h, dest_w, dest_h, val; int i; + struct drm_framebuffer *fb = plane->state->fb; mp = to_malidp_plane(plane); - /* convert src values from Q16 fixed point to integer */ - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + /* + * For AFBC framebuffer, use the framebuffer width and height for + * configuring layer input size register. + */ + if (fb->modifier) { + src_w = fb->width; + src_h = fb->height; + } else { + /* convert src values from Q16 fixed point to integer */ + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + } + dest_w = state->crtc_w; dest_h = state->crtc_h; @@ -615,15 +826,8 @@ static void malidp_de_plane_update(struct drm_plane *plane, val = (val & ~LAYER_FORMAT_MASK) | ms->format; malidp_hw_write(mp->hwdev, val, mp->layer->base); - for (i = 0; i < ms->n_planes; i++) { - /* calculate the offset for the layer's plane registers */ - u16 ptr = mp->layer->ptr + (i << 4); - dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb, - state, i); - - malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr); - malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4); - } + for (i = 0; i < ms->n_planes; i++) + malidp_set_plane_base_addr(fb, mp, i); malidp_de_set_mmu_control(mp, ms); @@ -657,6 +861,8 @@ static void malidp_de_plane_update(struct drm_plane *plane, mp->layer->base + MALIDP550_LS_R1_IN_SIZE); } + malidp_de_set_plane_afbc(plane); + /* first clear the rotation bits */ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); val &= ~LAYER_ROT_MASK; @@ -733,7 +939,26 @@ int malidp_de_planes_init(struct drm_device *drm) BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE); u32 *formats; - int ret, i, j, n; + int ret, i = 0, j = 0, n; + u64 supported_modifiers[MODIFIERS_COUNT_MAX]; + const u64 *modifiers; + + modifiers = malidp_format_modifiers; + + if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) { + /* + * Since our hardware does not support SPLIT, so build the list + * of supported modifiers excluding SPLIT ones. + */ + while (*modifiers != DRM_FORMAT_MOD_INVALID) { + if (!(*modifiers & AFBC_SPLIT)) + supported_modifiers[j++] = *modifiers; + + modifiers++; + } + supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID; + modifiers = supported_modifiers; + } formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); if (!formats) { @@ -758,9 +983,15 @@ int malidp_de_planes_init(struct drm_device *drm) plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + + /* + * All the layers except smart layer supports AFBC modifiers. + */ ret = drm_universal_plane_init(drm, &plane->base, crtcs, - &malidp_de_plane_funcs, formats, - n, NULL, plane_type, NULL); + &malidp_de_plane_funcs, formats, n, + (id == DE_SMART) ? NULL : modifiers, plane_type, + NULL); + if (ret < 0) goto cleanup; diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 7ce3e141464d..a0dd6e1676a8 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -198,10 +198,13 @@ #define MALIDP500_LV_YUV2RGB ((s16)(-0xB8)) #define MALIDP500_DE_LV_BASE 0x00100 #define MALIDP500_DE_LV_PTR_BASE 0x00124 +#define MALIDP500_DE_LV_AD_CTRL 0x00400 #define MALIDP500_DE_LG1_BASE 0x00200 #define MALIDP500_DE_LG1_PTR_BASE 0x0021c +#define MALIDP500_DE_LG1_AD_CTRL 0x0040c #define MALIDP500_DE_LG2_BASE 0x00300 #define MALIDP500_DE_LG2_PTR_BASE 0x0031c +#define MALIDP500_DE_LG2_AD_CTRL 0x00418 #define MALIDP500_SE_BASE 0x00c00 #define MALIDP500_SE_CONTROL 0x00c0c #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c @@ -228,10 +231,13 @@ #define MALIDP550_LV_YUV2RGB 0x00084 #define MALIDP550_DE_LV1_BASE 0x00100 #define MALIDP550_DE_LV1_PTR_BASE 0x00124 +#define MALIDP550_DE_LV1_AD_CTRL 0x001B8 #define MALIDP550_DE_LV2_BASE 0x00200 #define MALIDP550_DE_LV2_PTR_BASE 0x00224 +#define MALIDP550_DE_LV2_AD_CTRL 0x002B8 #define MALIDP550_DE_LG_BASE 0x00300 #define MALIDP550_DE_LG_PTR_BASE 0x0031c +#define MALIDP550_DE_LG_AD_CTRL 0x00330 #define MALIDP550_DE_LS_BASE 0x00400 #define MALIDP550_DE_LS_PTR_BASE 0x0042c #define MALIDP550_DE_PERF_BASE 0x00500 @@ -258,6 +264,20 @@ #define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x))) #define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12) +/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */ +/* The following register offsets are common for DP500, DP550 and DP650 */ +#define MALIDP_AD_CROP_H 0x4 +#define MALIDP_AD_CROP_V 0x8 +#define MALIDP_AD_END_PTR_LOW 0xc +#define MALIDP_AD_END_PTR_HIGH 0x10 + +/* AFBC decoder Registers */ +#define MALIDP_AD_EN BIT(0) +#define MALIDP_AD_YTR BIT(4) +#define MALIDP_AD_BS BIT(8) +#define MALIDP_AD_CROP_RIGHT_OFFSET 16 +#define MALIDP_AD_CROP_BOTTOM_OFFSET 16 + /* * Starting with DP550 the register map blocks has been standardised to the * following layout: diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 9cd82e3631fb..9e7cd6b34106 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -214,20 +214,9 @@ static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *con return MODE_OK; } -static struct drm_encoder * -bochs_connector_best_encoder(struct drm_connector *connector) -{ - int enc_id = connector->encoder_ids[0]; - /* pick the encoder ids */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); - return NULL; -} - static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { .get_modes = bochs_connector_get_modes, .mode_valid = bochs_connector_mode_valid, - .best_encoder = bochs_connector_best_encoder, }; static const struct drm_connector_funcs bochs_connector_connector_funcs = { diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index dc8ae98071b4..86efd2da37f9 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1752,7 +1752,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, * * NOTE: Commit work has multiple phases, first hardware commit, then * cleanup. We want them to overlap, hence need system_unbound_wq to - * make sure work items don't artifically stall on each another. + * make sure work items don't artificially stall on each another. */ drm_atomic_state_get(state); @@ -1786,7 +1786,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * * Asynchronous workers need to have sufficient parallelism to be able to run * different atomic commits on different CRTCs in parallel. The simplest way to - * achive this is by running them on the &system_unbound_wq work queue. Note + * achieve this is by running them on the &system_unbound_wq work queue. Note * that drivers are not required to split up atomic commits and run an * individual commit in parallel - userspace is supposed to do that if it cares. * But it might be beneficial to do that for modesets, since those necessarily diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 8fa77def577f..ea797d4c82ee 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -733,6 +733,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, return -EINVAL; } state->content_protection = val; + } else if (property == connector->colorspace_property) { + state->colorspace = val; } else if (property == config->writeback_fb_id_property) { struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); @@ -801,6 +803,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->picture_aspect_ratio; } else if (property == config->content_type_property) { *val = state->content_type; + } else if (property == connector->colorspace_property) { + *val = state->colorspace; } else if (property == connector->scaling_mode_property) { *val = state->scaling_mode; } else if (property == connector->content_protection_property) { diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index dd40eff0911c..2355124849db 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -245,6 +245,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->modes); mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; + connector->tile_blob_ptr = NULL; connector->status = connector_status_unknown; connector->display_info.panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; @@ -272,6 +273,9 @@ int drm_connector_init(struct drm_device *dev, drm_object_attach_property(&connector->base, config->non_desktop_property, 0); + drm_object_attach_property(&connector->base, + config->tile_property, + 0); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); @@ -826,6 +830,33 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = { }; DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) +static const struct drm_prop_enum_list hdmi_colorspaces[] = { + /* For Default case, driver will set the colorspace */ + { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + /* Standard Definition Colorimetry based on CEA 861 */ + { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" }, + { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, + /* Standard Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + /* High Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ + { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 [33] */ + { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 */ + { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, + /* Added as part of Additional Colorimetry Extension in 861.G */ + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" }, +}; + /** * DOC: standard connector properties * @@ -1548,6 +1579,57 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev) EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); /** + * DOC: standard connector properties + * + * Colorspace: + * drm_mode_create_colorspace_property - create colorspace property + * This property helps select a suitable colorspace based on the sink + * capability. Modern sink devices support wider gamut like BT2020. + * This helps switch to BT2020 mode if the BT2020 encoded video stream + * is being played by the user, same for any other colorspace. Thereby + * giving a good visual experience to users. + * + * The expectation from userspace is that it should parse the EDID + * and get supported colorspaces. Use this property and switch to the + * one supported. Sink supported colorspaces should be retrieved by + * userspace from EDID and driver will not explicitly expose them. + * + * Basically the expectation from userspace is: + * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink + * colorspace + * - Set this new property to let the sink know what it + * converted the CRTC output to. + * - This property is just to inform sink what colorspace + * source is trying to drive. + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + */ +int drm_mode_create_colorspace_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + "Colorspace", + hdmi_colorspaces, + ARRAY_SIZE(hdmi_colorspaces)); + if (!prop) + return -ENOMEM; + } else { + DRM_DEBUG_KMS("Colorspace property not supported\n"); + return 0; + } + + connector->colorspace_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_colorspace_property); + +/** * drm_mode_create_content_type_property - create content type property * @dev: DRM device * @@ -1634,6 +1716,8 @@ EXPORT_SYMBOL(drm_connector_set_path_property); * This looks up the tile information for a connector, and creates a * property for userspace to parse if it exists. The property is of * the form of 8 integers using ':' as a separator. + * This is used for dual port tiled displays with DisplayPort SST + * or DisplayPort MST connectors. * * Returns: * Zero on success, errno on failure. @@ -1677,6 +1761,9 @@ EXPORT_SYMBOL(drm_connector_set_tile_property); * * This function creates a new blob modeset object and assigns its id to the * connector's edid property. + * Since we also parse tile information from EDID's displayID block, we also + * set the connector's tile property here. See drm_connector_set_tile_property() + * for more details. * * Returns: * Zero on success, negative errno on failure. @@ -1718,7 +1805,9 @@ int drm_connector_update_edid_property(struct drm_connector *connector, edid, &connector->base, dev->mode_config.edid_property); - return ret; + if (ret) + return ret; + return drm_connector_set_tile_property(connector); } EXPORT_SYMBOL(drm_connector_update_edid_property); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index dc7ac0c60547..c630ed157994 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3022,7 +3022,6 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ edid = drm_edid_duplicate(port->cached_edid); else { edid = drm_get_edid(connector, &port->aux.ddc); - drm_connector_set_tile_property(connector); } port->has_audio = drm_detect_monitor_audio(edid); drm_dp_mst_topology_put_port(port); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 381581b01d48..50d849d1bc6e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -286,6 +286,138 @@ void drm_minor_release(struct drm_minor *minor) * Note that the lifetime rules for &drm_device instance has still a lot of * historical baggage. Hence use the reference counting provided by * drm_dev_get() and drm_dev_put() only carefully. + * + * Display driver example + * ~~~~~~~~~~~~~~~~~~~~~~ + * + * The following example shows a typical structure of a DRM display driver. + * The example focus on the probe() function and the other functions that is + * almost always present and serves as a demonstration of devm_drm_dev_init() + * usage with its accompanying drm_driver->release callback. + * + * .. code-block:: c + * + * struct driver_device { + * struct drm_device drm; + * void *userspace_facing; + * struct clk *pclk; + * }; + * + * static void driver_drm_release(struct drm_device *drm) + * { + * struct driver_device *priv = container_of(...); + * + * drm_mode_config_cleanup(drm); + * drm_dev_fini(drm); + * kfree(priv->userspace_facing); + * kfree(priv); + * } + * + * static struct drm_driver driver_drm_driver = { + * [...] + * .release = driver_drm_release, + * }; + * + * static int driver_probe(struct platform_device *pdev) + * { + * struct driver_device *priv; + * struct drm_device *drm; + * int ret; + * + * [ + * devm_kzalloc() can't be used here because the drm_device + * lifetime can exceed the device lifetime if driver unbind + * happens when userspace still has open file descriptors. + * ] + * priv = kzalloc(sizeof(*priv), GFP_KERNEL); + * if (!priv) + * return -ENOMEM; + * + * drm = &priv->drm; + * + * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); + * if (ret) { + * kfree(drm); + * return ret; + * } + * + * drm_mode_config_init(drm); + * + * priv->userspace_facing = kzalloc(..., GFP_KERNEL); + * if (!priv->userspace_facing) + * return -ENOMEM; + * + * priv->pclk = devm_clk_get(dev, "PCLK"); + * if (IS_ERR(priv->pclk)) + * return PTR_ERR(priv->pclk); + * + * [ Further setup, display pipeline etc ] + * + * platform_set_drvdata(pdev, drm); + * + * drm_mode_config_reset(drm); + * + * ret = drm_dev_register(drm); + * if (ret) + * return ret; + * + * drm_fbdev_generic_setup(drm, 32); + * + * return 0; + * } + * + * [ This function is called before the devm_ resources are released ] + * static int driver_remove(struct platform_device *pdev) + * { + * struct drm_device *drm = platform_get_drvdata(pdev); + * + * drm_dev_unregister(drm); + * drm_atomic_helper_shutdown(drm) + * + * return 0; + * } + * + * [ This function is called on kernel restart and shutdown ] + * static void driver_shutdown(struct platform_device *pdev) + * { + * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); + * } + * + * static int __maybe_unused driver_pm_suspend(struct device *dev) + * { + * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); + * } + * + * static int __maybe_unused driver_pm_resume(struct device *dev) + * { + * drm_mode_config_helper_resume(dev_get_drvdata(dev)); + * + * return 0; + * } + * + * static const struct dev_pm_ops driver_pm_ops = { + * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) + * }; + * + * static struct platform_driver driver_driver = { + * .driver = { + * [...] + * .pm = &driver_pm_ops, + * }, + * .probe = driver_probe, + * .remove = driver_remove, + * .shutdown = driver_shutdown, + * }; + * module_platform_driver(driver_driver); + * + * Drivers that want to support device unplugging (USB, DT overlay unload) should + * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect + * regions that is accessing device resources to prevent use after they're + * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one + * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before + * drm_atomic_helper_shutdown() is called. This means that if the disable code + * paths are protected, they will not run on regular driver module unload, + * possibily leaving the hardware enabled. */ /** @@ -376,11 +508,6 @@ void drm_dev_unplug(struct drm_device *dev) synchronize_srcu(&drm_unplug_srcu); drm_dev_unregister(dev); - - mutex_lock(&drm_global_mutex); - if (dev->open_count == 0) - drm_dev_put(dev); - mutex_unlock(&drm_global_mutex); } EXPORT_SYMBOL(drm_dev_unplug); @@ -457,6 +584,31 @@ static void drm_fs_inode_free(struct inode *inode) } /** + * DOC: component helper usage recommendations + * + * DRM drivers that drive hardware where a logical device consists of a pile of + * independent hardware blocks are recommended to use the :ref:`component helper + * library<component>`. For consistency and better options for code reuse the + * following guidelines apply: + * + * - The entire device initialization procedure should be run from the + * &component_master_ops.master_bind callback, starting with drm_dev_init(), + * then binding all components with component_bind_all() and finishing with + * drm_dev_register(). + * + * - The opaque pointer passed to all components through component_bind_all() + * should point at &struct drm_device of the device instance, not some driver + * specific private structure. + * + * - The component helper fills the niche where further standardization of + * interfaces is not practical. When there already is, or will be, a + * standardized interface like &drm_bridge or &drm_panel, providing its own + * functions to find such components at driver load time, like + * drm_of_find_panel_or_bridge(), then the component helper should not be + * used. + */ + +/** * drm_dev_init - Initialise new DRM device * @dev: DRM device * @driver: DRM driver @@ -501,7 +653,7 @@ int drm_dev_init(struct drm_device *dev, BUG_ON(!parent); kref_init(&dev->ref); - dev->dev = parent; + dev->dev = get_device(parent); dev->driver = driver; /* no per-device feature limits by default */ @@ -571,6 +723,7 @@ err_minors: drm_minor_free(dev, DRM_MINOR_RENDER); drm_fs_inode_free(dev->anon_inode); err_free: + put_device(dev->dev); mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); @@ -580,6 +733,45 @@ err_free: } EXPORT_SYMBOL(drm_dev_init); +static void devm_drm_dev_init_release(void *data) +{ + drm_dev_put(data); +} + +/** + * devm_drm_dev_init - Resource managed drm_dev_init() + * @parent: Parent device object + * @dev: DRM device + * @driver: DRM driver + * + * Managed drm_dev_init(). The DRM device initialized with this function is + * automatically put on driver detach using drm_dev_put(). You must supply a + * &drm_driver.release callback to control the finalization explicitly. + * + * RETURNS: + * 0 on success, or error code on failure. + */ +int devm_drm_dev_init(struct device *parent, + struct drm_device *dev, + struct drm_driver *driver) +{ + int ret; + + if (WARN_ON(!parent || !driver->release)) + return -EINVAL; + + ret = drm_dev_init(dev, driver, parent); + if (ret) + return ret; + + ret = devm_add_action(parent, devm_drm_dev_init_release, dev); + if (ret) + devm_drm_dev_init_release(dev); + + return ret; +} +EXPORT_SYMBOL(devm_drm_dev_init); + /** * drm_dev_fini - Finalize a dead DRM device * @dev: DRM device @@ -606,6 +798,8 @@ void drm_dev_fini(struct drm_device *dev) drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); + put_device(dev->dev); + mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index bce99f95c1a3..77f4e5ae4197 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/byteorder/generic.h> +#include <drm/drm_print.h> #include <drm/drm_dp_helper.h> #include <drm/drm_dsc.h> @@ -31,75 +32,74 @@ /** * drm_dsc_dp_pps_header_init() - Initializes the PPS Header * for DisplayPort as per the DP 1.4 spec. - * @pps_sdp: Secondary data packet for DSC Picture Parameter Set - * as defined in &struct drm_dsc_pps_infoframe + * @pps_header: Secondary data packet header for DSC Picture + * Parameter Set as defined in &struct dp_sdp_header * * DP 1.4 spec defines the secondary data packet for sending the * picture parameter infoframes from the source to the sink. - * This function populates the pps header defined in - * &struct drm_dsc_pps_infoframe as per the header bytes defined - * in &struct dp_sdp_header. + * This function populates the SDP header defined in + * &struct dp_sdp_header. */ -void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp) +void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header) { - memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header)); + memset(pps_header, 0, sizeof(*pps_header)); - pps_sdp->pps_header.HB1 = DP_SDP_PPS; - pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1; + pps_header->HB1 = DP_SDP_PPS; + pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1; } EXPORT_SYMBOL(drm_dsc_dp_pps_header_init); /** - * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe + * drm_dsc_pps_payload_pack() - Populates the DSC PPS * - * @pps_sdp: - * Secondary data packet for DSC Picture Parameter Set. This is defined - * by &struct drm_dsc_pps_infoframe + * @pps_payload: + * Bitwise struct for DSC Picture Parameter Set. This is defined + * by &struct drm_dsc_picture_parameter_set * @dsc_cfg: * DSC Configuration data filled by driver as defined by * &struct drm_dsc_config * - * DSC source device sends a secondary data packet filled with all the - * picture parameter set (PPS) information required by the sink to decode - * the compressed frame. Driver populates the dsC PPS infoframe using the DSC - * configuration parameters in the order expected by the DSC Display Sink - * device. For the DSC, the sink device expects the PPS payload in the big - * endian format for the fields that span more than 1 byte. + * DSC source device sends a picture parameter set (PPS) containing the + * information required by the sink to decode the compressed frame. Driver + * populates the DSC PPS struct using the DSC configuration parameters in + * the order expected by the DSC Display Sink device. For the DSC, the sink + * device expects the PPS payload in big endian format for fields + * that span more than 1 byte. */ -void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, +void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, const struct drm_dsc_config *dsc_cfg) { int i; /* Protect against someone accidently changing struct size */ - BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) != + BUILD_BUG_ON(sizeof(*pps_payload) != DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1); - memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload)); + memset(pps_payload, 0, sizeof(*pps_payload)); /* PPS 0 */ - pps_sdp->pps_payload.dsc_version = + pps_payload->dsc_version = dsc_cfg->dsc_version_minor | dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT; /* PPS 1, 2 is 0 */ /* PPS 3 */ - pps_sdp->pps_payload.pps_3 = + pps_payload->pps_3 = dsc_cfg->line_buf_depth | dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT; /* PPS 4 */ - pps_sdp->pps_payload.pps_4 = + pps_payload->pps_4 = ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >> DSC_PPS_MSB_SHIFT) | dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT | - dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT | + dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT | dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT | dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT; /* PPS 5 */ - pps_sdp->pps_payload.bits_per_pixel_low = + pps_payload->bits_per_pixel_low = (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK); /* @@ -110,103 +110,103 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, */ /* PPS 6, 7 */ - pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height); + pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height); /* PPS 8, 9 */ - pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width); + pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width); /* PPS 10, 11 */ - pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height); + pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height); /* PPS 12, 13 */ - pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width); + pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width); /* PPS 14, 15 */ - pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size); + pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size); /* PPS 16 */ - pps_sdp->pps_payload.initial_xmit_delay_high = + pps_payload->initial_xmit_delay_high = ((dsc_cfg->initial_xmit_delay & DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >> DSC_PPS_MSB_SHIFT); /* PPS 17 */ - pps_sdp->pps_payload.initial_xmit_delay_low = + pps_payload->initial_xmit_delay_low = (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK); /* PPS 18, 19 */ - pps_sdp->pps_payload.initial_dec_delay = + pps_payload->initial_dec_delay = cpu_to_be16(dsc_cfg->initial_dec_delay); /* PPS 20 is 0 */ /* PPS 21 */ - pps_sdp->pps_payload.initial_scale_value = + pps_payload->initial_scale_value = dsc_cfg->initial_scale_value; /* PPS 22, 23 */ - pps_sdp->pps_payload.scale_increment_interval = + pps_payload->scale_increment_interval = cpu_to_be16(dsc_cfg->scale_increment_interval); /* PPS 24 */ - pps_sdp->pps_payload.scale_decrement_interval_high = + pps_payload->scale_decrement_interval_high = ((dsc_cfg->scale_decrement_interval & DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >> DSC_PPS_MSB_SHIFT); /* PPS 25 */ - pps_sdp->pps_payload.scale_decrement_interval_low = + pps_payload->scale_decrement_interval_low = (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK); /* PPS 26[7:0], PPS 27[7:5] RESERVED */ /* PPS 27 */ - pps_sdp->pps_payload.first_line_bpg_offset = + pps_payload->first_line_bpg_offset = dsc_cfg->first_line_bpg_offset; /* PPS 28, 29 */ - pps_sdp->pps_payload.nfl_bpg_offset = + pps_payload->nfl_bpg_offset = cpu_to_be16(dsc_cfg->nfl_bpg_offset); /* PPS 30, 31 */ - pps_sdp->pps_payload.slice_bpg_offset = + pps_payload->slice_bpg_offset = cpu_to_be16(dsc_cfg->slice_bpg_offset); /* PPS 32, 33 */ - pps_sdp->pps_payload.initial_offset = + pps_payload->initial_offset = cpu_to_be16(dsc_cfg->initial_offset); /* PPS 34, 35 */ - pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset); + pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset); /* PPS 36 */ - pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp; + pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp; /* PPS 37 */ - pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp; + pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp; /* PPS 38, 39 */ - pps_sdp->pps_payload.rc_model_size = + pps_payload->rc_model_size = cpu_to_be16(DSC_RC_MODEL_SIZE_CONST); /* PPS 40 */ - pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; + pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; /* PPS 41 */ - pps_sdp->pps_payload.rc_quant_incr_limit0 = + pps_payload->rc_quant_incr_limit0 = dsc_cfg->rc_quant_incr_limit0; /* PPS 42 */ - pps_sdp->pps_payload.rc_quant_incr_limit1 = + pps_payload->rc_quant_incr_limit1 = dsc_cfg->rc_quant_incr_limit1; /* PPS 43 */ - pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST | + pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST | DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT; /* PPS 44 - 57 */ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) - pps_sdp->pps_payload.rc_buf_thresh[i] = + pps_payload->rc_buf_thresh[i] = dsc_cfg->rc_buf_thresh[i]; /* PPS 58 - 87 */ @@ -215,32 +215,181 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0] */ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { - pps_sdp->pps_payload.rc_range_parameters[i] = + pps_payload->rc_range_parameters[i] = ((dsc_cfg->rc_range_params[i].range_min_qp << DSC_PPS_RC_RANGE_MINQP_SHIFT) | (dsc_cfg->rc_range_params[i].range_max_qp << DSC_PPS_RC_RANGE_MAXQP_SHIFT) | (dsc_cfg->rc_range_params[i].range_bpg_offset)); - pps_sdp->pps_payload.rc_range_parameters[i] = - cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]); + pps_payload->rc_range_parameters[i] = + cpu_to_be16(pps_payload->rc_range_parameters[i]); } /* PPS 88 */ - pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 | + pps_payload->native_422_420 = dsc_cfg->native_422 | dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT; /* PPS 89 */ - pps_sdp->pps_payload.second_line_bpg_offset = + pps_payload->second_line_bpg_offset = dsc_cfg->second_line_bpg_offset; /* PPS 90, 91 */ - pps_sdp->pps_payload.nsl_bpg_offset = + pps_payload->nsl_bpg_offset = cpu_to_be16(dsc_cfg->nsl_bpg_offset); /* PPS 92, 93 */ - pps_sdp->pps_payload.second_line_offset_adj = + pps_payload->second_line_offset_adj = cpu_to_be16(dsc_cfg->second_line_offset_adj); /* PPS 94 - 127 are O */ } -EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack); +EXPORT_SYMBOL(drm_dsc_pps_payload_pack); + +/** + * drm_dsc_compute_rc_parameters() - Write rate control + * parameters to the dsc configuration defined in + * &struct drm_dsc_config in accordance with the DSC 1.2 + * specification. Some configuration fields must be present + * beforehand. + * + * @vdsc_cfg: + * DSC Configuration data partially filled by driver + */ +int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) +{ + unsigned long groups_per_line = 0; + unsigned long groups_total = 0; + unsigned long num_extra_mux_bits = 0; + unsigned long slice_bits = 0; + unsigned long hrd_delay = 0; + unsigned long final_scale = 0; + unsigned long rbs_min = 0; + + if (vdsc_cfg->native_420 || vdsc_cfg->native_422) { + /* Number of groups used to code each line of a slice */ + groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2, + DSC_RC_PIXELS_PER_GROUP); + + /* chunksize in Bytes */ + vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 * + vdsc_cfg->bits_per_pixel, + (8 * 16)); + } else { + /* Number of groups used to code each line of a slice */ + groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, + DSC_RC_PIXELS_PER_GROUP); + + /* chunksize in Bytes */ + vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * + vdsc_cfg->bits_per_pixel, + (8 * 16)); + } + + if (vdsc_cfg->convert_rgb) + num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + - 2); + else if (vdsc_cfg->native_422) + num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + + 3 * (4 * vdsc_cfg->bits_per_component) - 2; + else + num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + + 2 * (4 * vdsc_cfg->bits_per_component) - 2; + /* Number of bits in one Slice */ + slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; + + while ((num_extra_mux_bits > 0) && + ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) + num_extra_mux_bits--; + + if (groups_per_line < vdsc_cfg->initial_scale_value - 8) + vdsc_cfg->initial_scale_value = groups_per_line + 8; + + /* scale_decrement_interval calculation according to DSC spec 1.11 */ + if (vdsc_cfg->initial_scale_value > 8) + vdsc_cfg->scale_decrement_interval = groups_per_line / + (vdsc_cfg->initial_scale_value - 8); + else + vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; + + vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - + (vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; + + if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { + DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); + return -ERANGE; + } + + final_scale = (vdsc_cfg->rc_model_size * 8) / + (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); + if (vdsc_cfg->slice_height > 1) + /* + * NflBpgOffset is 16 bit value with 11 fractional bits + * hence we multiply by 2^11 for preserving the + * fractional part + */ + vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), + (vdsc_cfg->slice_height - 1)); + else + vdsc_cfg->nfl_bpg_offset = 0; + + /* 2^16 - 1 */ + if (vdsc_cfg->nfl_bpg_offset > 65535) { + DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); + return -ERANGE; + } + + /* Number of groups used to code the entire slice */ + groups_total = groups_per_line * vdsc_cfg->slice_height; + + /* slice_bpg_offset is 16 bit value with 11 fractional bits */ + vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - + vdsc_cfg->initial_offset + + num_extra_mux_bits) << 11), + groups_total); + + if (final_scale > 9) { + /* + * ScaleIncrementInterval = + * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) + * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, + * we need divide by 2^11 from pstDscCfg values + */ + vdsc_cfg->scale_increment_interval = + (vdsc_cfg->final_offset * (1 << 11)) / + ((vdsc_cfg->nfl_bpg_offset + + vdsc_cfg->slice_bpg_offset) * + (final_scale - 9)); + } else { + /* + * If finalScaleValue is less than or equal to 9, a value of 0 should + * be used to disable the scale increment at the end of the slice + */ + vdsc_cfg->scale_increment_interval = 0; + } + + if (vdsc_cfg->scale_increment_interval > 65535) { + DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); + return -ERANGE; + } + + /* + * DSC spec mentions that bits_per_pixel specifies the target + * bits/pixel (bpp) rate that is used by the encoder, + * in steps of 1/16 of a bit per pixel + */ + rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel, 16) + + groups_per_line * vdsc_cfg->first_line_bpg_offset; + + hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); + vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; + vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + + return 0; +} +EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 990b1909f9d7..fa39592ebc0a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -193,6 +193,12 @@ static const struct edid_quirk { /* Sony PlayStation VR Headset */ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, + + /* Sensics VR Headsets */ + { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP }, + + /* OSVR HDK and HDK2 VR Headsets */ + { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP }, }; /* @@ -4924,6 +4930,76 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); +/* HDMI Colorspace Spec Definitions */ +#define FULL_COLORIMETRY_MASK 0x1FF +#define NORMAL_COLORIMETRY_MASK 0x3 +#define EXTENDED_COLORIMETRY_MASK 0x7 +#define EXTENDED_ACE_COLORIMETRY_MASK 0xF + +#define C(x) ((x) << 0) +#define EC(x) ((x) << 2) +#define ACE(x) ((x) << 5) + +#define HDMI_COLORIMETRY_NO_DATA 0x0 +#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0)) +#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0)) +#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0)) +#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0)) +#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0)) +#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1)) + +static const u32 hdmi_colorimetry_val[] = { + [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA, + [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC, + [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC, + [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601, + [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709, + [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601, + [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601, + [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB, + [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC, + [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB, + [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC, +}; + +#undef C +#undef EC +#undef ACE + +/** + * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe + * colorspace information + * @frame: HDMI AVI infoframe + * @conn_state: connector state + */ +void +drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + u32 colorimetry_val; + u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK; + + if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val)) + colorimetry_val = HDMI_COLORIMETRY_NO_DATA; + else + colorimetry_val = hdmi_colorimetry_val[colorimetry_index]; + + frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK; + /* + * ToDo: Extend it for ACE formats as well. Modify the infoframe + * structure and extend it in drivers/video/hdmi + */ + frame->extended_colorimetry = (colorimetry_val >> 2) & + EXTENDED_COLORIMETRY_MASK; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace); + /** * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe * quantization range information diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0e9349ff2d16..04d23cb430bf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -3024,7 +3024,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; - if (!try_module_get(fb_helper->dev->driver->fops->owner)) + /* No need to take a ref for fbcon because it unbinds on unregister */ + if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) return -ENODEV; return 0; @@ -3034,7 +3035,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; - module_put(fb_helper->dev->driver->fops->owner); + if (user) + module_put(fb_helper->dev->driver->fops->owner); return 0; } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 83a5bbca6e7e..9701469a6e93 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp) drm_close_helper(filp); - if (!--dev->open_count) { + if (!--dev->open_count) drm_lastclose(dev); - if (drm_dev_is_unplugged(dev)) - drm_put_dev(dev); - } + mutex_unlock(&drm_global_mutex); drm_minor_release(minor); @@ -579,6 +577,7 @@ put_back_event: file_priv->event_space -= length; list_add(&e->link, &file_priv->event_list); spin_unlock_irq(&dev->event_lock); + wake_up_interruptible(&file_priv->event_wait); break; } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index ba7e19d4336c..6ea55fb4526d 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -198,6 +198,10 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, @@ -225,7 +229,17 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, @@ -247,6 +261,19 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2, .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, + { .format = DRM_FORMAT_P210, .depth = 0, + .num_planes = 2, .char_per_block = { 2, 4, 0 }, + .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2, + .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VUY101010, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1, + .is_yuv = true }, + { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, + .is_yuv = true }, + { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, + .is_yuv = true }, }; unsigned int i; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d0b9f6a9953f..388b3742e562 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -171,6 +171,10 @@ void drm_gem_private_object_init(struct drm_device *dev, kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; + reservation_object_init(&obj->_resv); + if (!obj->resv) + obj->resv = &obj->_resv; + drm_vma_node_reset(&obj->vma_node); } EXPORT_SYMBOL(drm_gem_private_object_init); @@ -688,6 +692,44 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle) EXPORT_SYMBOL(drm_gem_object_lookup); /** + * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects + * shared and/or exclusive fences. + * @filep: DRM file private date + * @handle: userspace handle + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @timeout: timeout value in jiffies or zero to return immediately + * + * Returns: + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than 0 on success. + */ +long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, + bool wait_all, unsigned long timeout) +{ + long ret; + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(filep, handle); + if (!obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + return -EINVAL; + } + + ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, + true, timeout); + if (ret == 0) + ret = -ETIME; + else if (ret > 0) + ret = 0; + + drm_gem_object_put_unlocked(obj); + + return ret; +} +EXPORT_SYMBOL(drm_gem_reservation_object_wait); + +/** * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl * @dev: drm_device * @data: ioctl data @@ -851,6 +893,7 @@ drm_gem_object_release(struct drm_gem_object *obj) if (obj->filp) fput(obj->filp); + reservation_object_fini(&obj->_resv); drm_gem_free_mmap_offset(obj); } EXPORT_SYMBOL(drm_gem_object_release); @@ -1190,3 +1233,81 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) obj->dev->driver->gem_prime_vunmap(obj, vaddr); } EXPORT_SYMBOL(drm_gem_vunmap); + +/** + * drm_gem_lock_reservations - Sets up the ww context and acquires + * the lock on an array of GEM objects. + * + * Once you've locked your reservations, you'll want to set up space + * for your shared fences (if applicable), submit your job, then + * drm_gem_unlock_reservations(). + * + * @objs: drm_gem_objects to lock + * @count: Number of objects in @objs + * @acquire_ctx: struct ww_acquire_ctx that will be initialized as + * part of tracking this set of locked reservations. + */ +int +drm_gem_lock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int contended = -1; + int i, ret; + + ww_acquire_init(acquire_ctx, &reservation_ww_class); + +retry: + if (contended != -1) { + struct drm_gem_object *obj = objs[contended]; + + ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, + acquire_ctx); + if (ret) { + ww_acquire_done(acquire_ctx); + return ret; + } + } + + for (i = 0; i < count; i++) { + if (i == contended) + continue; + + ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock, + acquire_ctx); + if (ret) { + int j; + + for (j = 0; j < i; j++) + ww_mutex_unlock(&objs[j]->resv->lock); + + if (contended != -1 && contended >= i) + ww_mutex_unlock(&objs[contended]->resv->lock); + + if (ret == -EDEADLK) { + contended = i; + goto retry; + } + + ww_acquire_done(acquire_ctx); + return ret; + } + } + + ww_acquire_done(acquire_ctx); + + return 0; +} +EXPORT_SYMBOL(drm_gem_lock_reservations); + +void +drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int i; + + for (i = 0; i < count; i++) + ww_mutex_unlock(&objs[i]->resv->lock); + + ww_acquire_fini(acquire_ctx); +} +EXPORT_SYMBOL(drm_gem_unlock_reservations); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c new file mode 100644 index 000000000000..3750a982aaf6 --- /dev/null +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 Noralf Trønnes + */ + +#include <linux/dma-buf.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/shmem_fs.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_prime.h> +#include <drm/drm_print.h> + +/** + * DOC: overview + * + * This library provides helpers for GEM objects backed by shmem buffers + * allocated using anonymous pageable memory. + */ + +static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { + .free = drm_gem_shmem_free_object, + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +/** + * drm_gem_shmem_create - Allocate an object with the given size + * @dev: DRM device + * @size: Size of the object to allocate + * + * This function creates a shmem GEM object. + * + * Returns: + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative + * error code on failure. + */ +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + int ret; + + size = PAGE_ALIGN(size); + + if (dev->driver->gem_create_object) + obj = dev->driver->gem_create_object(dev, size); + else + obj = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + if (!obj->funcs) + obj->funcs = &drm_gem_shmem_funcs; + + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto err_free; + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto err_release; + + shmem = to_drm_gem_shmem_obj(obj); + mutex_init(&shmem->pages_lock); + mutex_init(&shmem->vmap_lock); + + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See comments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + + return shmem; + +err_release: + drm_gem_object_release(obj); +err_free: + kfree(obj); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_create); + +/** + * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object + * @obj: GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free_object(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + WARN_ON(shmem->vmap_use_count); + + if (obj->import_attach) { + shmem->pages_use_count--; + drm_prime_gem_destroy(obj, shmem->sgt); + kvfree(shmem->pages); + } else { + if (shmem->sgt) { + dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, + shmem->sgt->nents, DMA_BIDIRECTIONAL); + + drm_gem_shmem_put_pages(shmem); + sg_free_table(shmem->sgt); + kfree(shmem->sgt); + } + } + + WARN_ON(shmem->pages_use_count); + + drm_gem_object_release(obj); + mutex_destroy(&shmem->pages_lock); + mutex_destroy(&shmem->vmap_lock); + kfree(shmem); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); + +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + struct page **pages; + + if (shmem->pages_use_count++ > 0) + return 0; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) { + DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); + shmem->pages_use_count = 0; + return PTR_ERR(pages); + } + + shmem->pages = pages; + + return 0; +} + +/* + * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object + * @shmem: shmem GEM object + * + * This function makes sure that backing pages exists for the shmem GEM object + * and increases the use count. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +{ + int ret; + + ret = mutex_lock_interruptible(&shmem->pages_lock); + if (ret) + return ret; + ret = drm_gem_shmem_get_pages_locked(shmem); + mutex_unlock(&shmem->pages_lock); + + return ret; +} +EXPORT_SYMBOL(drm_gem_shmem_get_pages); + +static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + + if (WARN_ON_ONCE(!shmem->pages_use_count)) + return; + + if (--shmem->pages_use_count > 0) + return; + + drm_gem_put_pages(obj, shmem->pages, + shmem->pages_mark_dirty_on_put, + shmem->pages_mark_accessed_on_put); + shmem->pages = NULL; +} + +/* + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * @shmem: shmem GEM object + * + * This function decreases the use count and puts the backing pages when use drops to zero. + */ +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +{ + mutex_lock(&shmem->pages_lock); + drm_gem_shmem_put_pages_locked(shmem); + mutex_unlock(&shmem->pages_lock); +} +EXPORT_SYMBOL(drm_gem_shmem_put_pages); + +/** + * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object + * @obj: GEM object + * + * This function makes sure the backing pages are pinned in memory while the + * buffer is exported. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_pin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_get_pages(shmem); +} +EXPORT_SYMBOL(drm_gem_shmem_pin); + +/** + * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object + * @obj: GEM object + * + * This function removes the requirement that the backing pages are pinned in + * memory. + */ +void drm_gem_shmem_unpin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_put_pages(shmem); +} +EXPORT_SYMBOL(drm_gem_shmem_unpin); + +static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + int ret; + + if (shmem->vmap_use_count++ > 0) + return shmem->vaddr; + + ret = drm_gem_shmem_get_pages(shmem); + if (ret) + goto err_zero_use; + + if (obj->import_attach) + shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); + else + shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL); + + if (!shmem->vaddr) { + DRM_DEBUG_KMS("Failed to vmap pages\n"); + ret = -ENOMEM; + goto err_put_pages; + } + + return shmem->vaddr; + +err_put_pages: + drm_gem_shmem_put_pages(shmem); +err_zero_use: + shmem->vmap_use_count = 0; + + return ERR_PTR(ret); +} + +/* + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * + * This function makes sure that a virtual address exists for the buffer backing + * the shmem GEM object. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +void *drm_gem_shmem_vmap(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + void *vaddr; + int ret; + + ret = mutex_lock_interruptible(&shmem->vmap_lock); + if (ret) + return ERR_PTR(ret); + vaddr = drm_gem_shmem_vmap_locked(shmem); + mutex_unlock(&shmem->vmap_lock); + + return vaddr; +} +EXPORT_SYMBOL(drm_gem_shmem_vmap); + +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + + if (WARN_ON_ONCE(!shmem->vmap_use_count)) + return; + + if (--shmem->vmap_use_count > 0) + return; + + if (obj->import_attach) + dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); + else + vunmap(shmem->vaddr); + + shmem->vaddr = NULL; + drm_gem_shmem_put_pages(shmem); +} + +/* + * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object + * @shmem: shmem GEM object + * + * This function removes the virtual address when use count drops to zero. + */ +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + mutex_lock(&shmem->vmap_lock); + drm_gem_shmem_vunmap_locked(shmem); + mutex_unlock(&shmem->vmap_lock); +} +EXPORT_SYMBOL(drm_gem_shmem_vunmap); + +struct drm_gem_shmem_object * +drm_gem_shmem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, + uint32_t *handle) +{ + struct drm_gem_shmem_object *shmem; + int ret; + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return shmem; + + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, &shmem->base, handle); + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put_unlocked(&shmem->base); + if (ret) + return ERR_PTR(ret); + + return shmem; +} +EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); + +/** + * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object + * @file: DRM file structure to create the dumb buffer for + * @dev: DRM device + * @args: IOCTL data + * + * This function computes the pitch of the dumb buffer and rounds it up to an + * integer number of bytes per pixel. Drivers for hardware that doesn't have + * any additional restrictions on the pitch can directly use this function as + * their &drm_driver.dumb_create callback. + * + * For hardware with additional restrictions, drivers can adjust the fields + * set up by userspace before calling into this function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + struct drm_gem_shmem_object *shmem; + + if (!args->pitch || !args->size) { + args->pitch = min_pitch; + args->size = args->pitch * args->height; + } else { + /* ensure sane minimum values */ + if (args->pitch < min_pitch) + args->pitch = min_pitch; + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + } + + shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); + + return PTR_ERR_OR_ZERO(shmem); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); + +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + loff_t num_pages = obj->size >> PAGE_SHIFT; + struct page *page; + + if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages)) + return VM_FAULT_SIGBUS; + + page = shmem->pages[vmf->pgoff]; + + return vmf_insert_page(vma, vmf->address, page); +} + +static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + int ret; + + ret = drm_gem_shmem_get_pages(shmem); + WARN_ON_ONCE(ret != 0); + + drm_gem_vm_open(vma); +} + +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_put_pages(shmem); + drm_gem_vm_close(vma); +} + +const struct vm_operations_struct drm_gem_shmem_vm_ops = { + .fault = drm_gem_shmem_fault, + .open = drm_gem_shmem_vm_open, + .close = drm_gem_shmem_vm_close, +}; +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); + +/** + * drm_gem_shmem_mmap - Memory-map a shmem GEM object + * @filp: File object + * @vma: VMA for the area to be mapped + * + * This function implements an augmented version of the GEM DRM file mmap + * operation for shmem objects. Drivers which employ the shmem helpers should + * use this function as their &file_operations.mmap handler in the DRM device file's + * file_operations structure. + * + * Instead of directly referencing this function, drivers should use the + * DEFINE_DRM_GEM_SHMEM_FOPS() macro. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_shmem_object *shmem; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + shmem = to_drm_gem_shmem_obj(vma->vm_private_data); + + ret = drm_gem_shmem_get_pages(shmem); + if (ret) { + drm_gem_vm_close(vma); + return ret; + } + + /* VM_PFNMAP was set by drm_gem_mmap() */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + /* Remove the fake offset */ + vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); + +/** + * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object + */ +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); + drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); + drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); +} +EXPORT_SYMBOL(drm_gem_shmem_print_info); + +/** + * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned + * pages for a shmem GEM object + * @obj: GEM object + * + * This function exports a scatter/gather table suitable for PRIME usage by + * calling the standard DMA mapping API. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); + +/** + * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a + * scatter/gather table for a shmem GEM object. + * @obj: GEM object + * + * This function returns a scatter/gather table suitable for driver usage. If + * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg + * table created. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or errno on failure. + */ +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) +{ + int ret; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct sg_table *sgt; + + if (shmem->sgt) + return shmem->sgt; + + WARN_ON(obj->import_attach); + + ret = drm_gem_shmem_get_pages(shmem); + if (ret) + return ERR_PTR(ret); + + sgt = drm_gem_shmem_get_sg_table(&shmem->base); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto err_put_pages; + } + /* Map the pages for use by the h/w. */ + dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + + shmem->sgt = sgt; + + return sgt; + +err_put_pages: + drm_gem_shmem_put_pages(shmem); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); + +/** + * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from + * another driver's scatter/gather table of pinned pages + * @dev: Device to import into + * @attach: DMA-BUF attachment + * @sgt: Scatter/gather table of pinned pages + * + * This function imports a scatter/gather table exported via DMA-BUF by + * another driver. Drivers that use the shmem helpers should set this as their + * &drm_driver.gem_prime_import_sg_table callback. + * + * Returns: + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative + * error code on failure. + */ +struct drm_gem_object * +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + size_t size = PAGE_ALIGN(attach->dmabuf->size); + size_t npages = size >> PAGE_SHIFT; + struct drm_gem_shmem_object *shmem; + int ret; + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return ERR_CAST(shmem); + + shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!shmem->pages) { + ret = -ENOMEM; + goto err_free_gem; + } + + ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); + if (ret < 0) + goto err_free_array; + + shmem->sgt = sgt; + shmem->pages_use_count = 1; /* Permanently pinned from our point of view */ + + DRM_DEBUG_PRIME("size = %zu\n", size); + + return &shmem->base; + +err_free_array: + kvfree(shmem->pages); +err_free_gem: + drm_gem_object_put_unlocked(&shmem->base); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index 93e2b30fe1a5..9c5ae825c507 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights"); /* Backward compatibility for drm_kms_helper.edid_firmware */ static int edid_firmware_set(const char *val, const struct kernel_param *kp) { - DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n"); + DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); return __drm_set_edid_firmware_path(val); } diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 40c4349cb939..8dbcdc77f6bf 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -35,6 +35,7 @@ #include <linux/highmem.h> #include <linux/export.h> +#include <xen/xen.h> #include <drm/drmP.h> #include "drm_legacy.h" @@ -150,15 +151,27 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) } EXPORT_SYMBOL(drm_legacy_ioremapfree); -u64 drm_get_max_iomem(void) +bool drm_need_swiotlb(int dma_bits) { struct resource *tmp; resource_size_t max_iomem = 0; + /* + * Xen paravirtual hosts require swiotlb regardless of requested dma + * transfer size. + * + * NOTE: Really, what it requires is use of the dma_alloc_coherent + * allocator used in ttm_dma_populate() instead of + * ttm_populate_and_map_pages(), which bounce buffers so much in + * Xen it leads to swiotlb buffer exhaustion. + */ + if (xen_pv_domain()) + return true; + for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { max_iomem = max(max_iomem, tmp->end); } - return max_iomem; + return max_iomem > ((u64)1 << dma_bits); } -EXPORT_SYMBOL(drm_get_max_iomem); +EXPORT_SYMBOL(drm_need_swiotlb); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 52e445bb1aa5..521aff99b08a 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -80,6 +80,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { + .width = 1200, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct dmi_system_id orientation_data[] = { { /* Acer One 10 (S1003) */ .matches = { @@ -148,6 +154,13 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Lenovo Ideapad D330 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 231e3f6d5f41..dc079efb3b0f 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, .size = obj->size, .flags = flags, .priv = obj, + .resv = obj->resv, }; if (dev->driver->gem_prime_res_obj) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..5329e66598c6 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -731,7 +731,7 @@ cleanup_entries: * * Calculate the timeout in jiffies from an absolute time in sec/nsec. */ -static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) +signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) { ktime_t abs_timeout, now; u64 timeout_ns, timeout_jiffies64; @@ -755,6 +755,7 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) return timeout_jiffies64 + 1; } +EXPORT_SYMBOL(drm_timeout_abs_to_jiffies); static int drm_syncobj_array_wait(struct drm_device *dev, struct drm_file *file_private, diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c3301046dfaa..8987501f53b2 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -584,8 +584,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &drm_vm_ops; break; } - /* fall through to _DRM_FRAME_BUFFER... */ #endif + /* fall through - to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: offset = drm_core_get_reg_ofs(dev); @@ -610,7 +610,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; vma->vm_page_prot = drm_dma_prot(map->type, vma); - /* fall through to _DRM_SHM */ + /* fall through - to _DRM_SHM */ case _DRM_SHM: vma->vm_ops = &drm_vm_shm_ops; vma->vm_private_data = (void *)map; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 18c27f795cf6..9f42f7538236 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -473,7 +473,6 @@ static struct drm_driver etnaviv_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_res_obj = etnaviv_gem_prime_res_obj, .gem_prime_pin = etnaviv_gem_prime_pin, .gem_prime_unpin = etnaviv_gem_prime_unpin, .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index a6a7ded37ef1..6044ace6bb3e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -60,7 +60,6 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5c48915f492d..c60752ef7324 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, + if (!reservation_object_test_signaled_rcu(obj->resv, write)) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, + ret = reservation_object_wait_timeout_rcu(obj->resv, write, true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; @@ -459,7 +459,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence, static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - struct reservation_object *robj = etnaviv_obj->resv; + struct reservation_object *robj = obj->resv; struct reservation_object_list *fobj; struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node); @@ -549,8 +549,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) drm_gem_free_mmap_offset(obj); etnaviv_obj->ops->release(etnaviv_obj); - if (etnaviv_obj->resv == &etnaviv_obj->_resv) - reservation_object_fini(&etnaviv_obj->_resv); drm_gem_object_release(obj); kfree(etnaviv_obj); @@ -596,12 +594,8 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, etnaviv_obj->flags = flags; etnaviv_obj->ops = ops; - if (robj) { - etnaviv_obj->resv = robj; - } else { - etnaviv_obj->resv = &etnaviv_obj->_resv; - reservation_object_init(&etnaviv_obj->_resv); - } + if (robj) + etnaviv_obj->base.resv = robj; mutex_init(&etnaviv_obj->lock); INIT_LIST_HEAD(&etnaviv_obj->vram_list); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index f0abb744ef95..753c458497d0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -47,10 +47,6 @@ struct etnaviv_gem_object { struct sg_table *sgt; void *vaddr; - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; - struct list_head vram_list; /* cache maintenance */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index f21529e635e3..00e8b6a817e3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -139,10 +139,3 @@ fail: return ERR_PTR(ret); } - -struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj) -{ - struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - - return etnaviv_obj->resv; -} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index b2fe3446bfbc..e054f09ac828 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -108,9 +108,9 @@ out_unlock: static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) { if (submit->bos[i].flags & BO_LOCKED) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; - ww_mutex_unlock(&etnaviv_obj->resv->lock); + ww_mutex_unlock(&obj->resv->lock); submit->bos[i].flags &= ~BO_LOCKED; } } @@ -122,7 +122,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit, retry: for (i = 0; i < submit->nr_bos; i++) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; if (slow_locked == i) slow_locked = -1; @@ -130,7 +130,7 @@ retry: contended = i; if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, + ret = ww_mutex_lock_interruptible(&obj->resv->lock, ticket); if (ret == -EALREADY) DRM_ERROR("BO at index %u already on submit list\n", @@ -153,12 +153,12 @@ fail: submit_unlock_object(submit, slow_locked); if (ret == -EDEADLK) { - struct etnaviv_gem_object *etnaviv_obj; + struct drm_gem_object *obj; - etnaviv_obj = submit->bos[contended].obj; + obj = &submit->bos[contended].obj->base; /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, + ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, ticket); if (!ret) { submit->bos[contended].flags |= BO_LOCKED; @@ -176,7 +176,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; - struct reservation_object *robj = bo->obj->resv; + struct reservation_object *robj = bo->obj->base.resv; if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { ret = reservation_object_reserve_shared(robj, 1); @@ -207,13 +207,13 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit) int i; for (i = 0; i < submit->nr_bos; i++) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) - reservation_object_add_excl_fence(etnaviv_obj->resv, + reservation_object_add_excl_fence(obj->resv, submit->out_fence); else - reservation_object_add_shared_fence(etnaviv_obj->resv, + reservation_object_add_shared_fence(obj->resv, submit->out_fence); submit_unlock_object(submit, i); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 638a586469f9..cf80c5d8af34 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6557,13 +6557,22 @@ enum { #define PLANE_CTL_FORMAT_YUV422 (0 << 24) #define PLANE_CTL_FORMAT_NV12 (1 << 24) #define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24) +#define PLANE_CTL_FORMAT_P010 (3 << 24) #define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24) +#define PLANE_CTL_FORMAT_P012 (5 << 24) #define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) +#define PLANE_CTL_FORMAT_P016 (7 << 24) #define PLANE_CTL_FORMAT_AYUV (8 << 24) #define PLANE_CTL_FORMAT_INDEXED (12 << 24) #define PLANE_CTL_FORMAT_RGB_565 (14 << 24) #define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) #define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ +#define PLANE_CTL_FORMAT_Y210 (1 << 23) +#define PLANE_CTL_FORMAT_Y212 (3 << 23) +#define PLANE_CTL_FORMAT_Y216 (5 << 23) +#define PLANE_CTL_FORMAT_Y410 (7 << 23) +#define PLANE_CTL_FORMAT_Y412 (9 << 23) +#define PLANE_CTL_FORMAT_Y416 (0xb << 23) #define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) #define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21) #define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 7cf9290ea34a..b844e8840c6f 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -126,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, */ if (new_conn_state->force_audio != old_conn_state->force_audio || new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || + new_conn_state->base.colorspace != old_conn_state->base.colorspace || new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) @@ -234,10 +235,11 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta if (plane_state && plane_state->base.fb && plane_state->base.fb->format->is_yuv && plane_state->base.fb->format->num_planes > 1) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) { mode = SKL_PS_SCALER_MODE_NV12; - } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) { + } else if (icl_is_hdr_plane(dev_priv, plane->id)) { /* * On gen11+'s HDR planes we only use the scaler for * scaling. They have a dedicated chroma upsampler, so diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index db0965904439..dd6c09699237 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -135,7 +135,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->active_planes |= BIT(plane->id); if (new_plane_state->base.visible && - new_plane_state->base.fb->format->format == DRM_FORMAT_NV12) + is_planar_yuv_format(new_plane_state->base.fb->format->format)) new_crtc_state->nv12_planes |= BIT(plane->id); if (new_plane_state->base.visible || old_plane_state->base.visible) diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c index ee16758747c5..8352d0bd8813 100644 --- a/drivers/gpu/drm/i915/intel_connector.c +++ b/drivers/gpu/drm/i915/intel_connector.c @@ -265,3 +265,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector) connector->dev->mode_config.aspect_ratio_property, DRM_MODE_PICTURE_ASPECT_NONE); } + +void +intel_attach_colorspace_property(struct drm_connector *connector) +{ + if (!drm_mode_create_colorspace_property(connector)) + drm_object_attach_property(&connector->base, + connector->colorspace_property, 0); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ccb616351bba..94496488641c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2677,6 +2677,24 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) return DRM_FORMAT_RGB565; case PLANE_CTL_FORMAT_NV12: return DRM_FORMAT_NV12; + case PLANE_CTL_FORMAT_P010: + return DRM_FORMAT_P010; + case PLANE_CTL_FORMAT_P012: + return DRM_FORMAT_P012; + case PLANE_CTL_FORMAT_P016: + return DRM_FORMAT_P016; + case PLANE_CTL_FORMAT_Y210: + return DRM_FORMAT_Y210; + case PLANE_CTL_FORMAT_Y212: + return DRM_FORMAT_Y212; + case PLANE_CTL_FORMAT_Y216: + return DRM_FORMAT_Y216; + case PLANE_CTL_FORMAT_Y410: + return DRM_FORMAT_XVYU2101010; + case PLANE_CTL_FORMAT_Y412: + return DRM_FORMAT_XVYU12_16161616; + case PLANE_CTL_FORMAT_Y416: + return DRM_FORMAT_XVYU16161616; default: case PLANE_CTL_FORMAT_XRGB_8888: if (rgb_order) { @@ -2695,6 +2713,18 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) return DRM_FORMAT_XBGR2101010; else return DRM_FORMAT_XRGB2101010; + case PLANE_CTL_FORMAT_XRGB_16161616F: + if (rgb_order) { + if (alpha) + return DRM_FORMAT_ABGR16161616F; + else + return DRM_FORMAT_XBGR16161616F; + } else { + if (alpha) + return DRM_FORMAT_ARGB16161616F; + else + return DRM_FORMAT_XRGB16161616F; + } } } @@ -3176,7 +3206,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since * the main surface setup depends on it. */ - if (fb->format->format == DRM_FORMAT_NV12) { + if (is_planar_yuv_format(fb->format->format)) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -3590,6 +3620,12 @@ static u32 skl_plane_ctl_format(u32 pixel_format) return PLANE_CTL_FORMAT_XRGB_2101010; case DRM_FORMAT_XBGR2101010: return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + return PLANE_CTL_FORMAT_XRGB_16161616F; case DRM_FORMAT_YUYV: return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; case DRM_FORMAT_YVYU: @@ -3600,6 +3636,24 @@ static u32 skl_plane_ctl_format(u32 pixel_format) return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; case DRM_FORMAT_NV12: return PLANE_CTL_FORMAT_NV12; + case DRM_FORMAT_P010: + return PLANE_CTL_FORMAT_P010; + case DRM_FORMAT_P012: + return PLANE_CTL_FORMAT_P012; + case DRM_FORMAT_P016: + return PLANE_CTL_FORMAT_P016; + case DRM_FORMAT_Y210: + return PLANE_CTL_FORMAT_Y210; + case DRM_FORMAT_Y212: + return PLANE_CTL_FORMAT_Y212; + case DRM_FORMAT_Y216: + return PLANE_CTL_FORMAT_Y216; + case DRM_FORMAT_XVYU2101010: + return PLANE_CTL_FORMAT_Y410; + case DRM_FORMAT_XVYU12_16161616: + return PLANE_CTL_FORMAT_Y412; + case DRM_FORMAT_XVYU16161616: + return PLANE_CTL_FORMAT_Y416; default: MISSING_CASE(pixel_format); } @@ -3772,6 +3826,8 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); const struct drm_framebuffer *fb = plane_state->base.fb; struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 plane_color_ctl = 0; @@ -3779,7 +3835,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); - if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) { + if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; else @@ -5036,9 +5092,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } - if (format && format->format == DRM_FORMAT_NV12 && + if (format && is_planar_yuv_format(format->format) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { - DRM_DEBUG_KMS("NV12: src dimensions not met\n"); + DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); return -EINVAL; } @@ -5105,14 +5161,15 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, { struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); struct drm_framebuffer *fb = plane_state->base.fb; int ret; bool force_detach = !fb || !plane_state->base.visible; bool need_scaler = false; /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ - if (!icl_is_hdr_plane(intel_plane) && - fb && fb->format->format == DRM_FORMAT_NV12) + if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && + fb && is_planar_yuv_format(fb->format->format)) need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, @@ -5144,11 +5201,24 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: break; default: DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", @@ -11134,7 +11204,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) } if (!linked_state) { - DRM_DEBUG_KMS("Need %d free Y planes for NV12\n", + DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", hweight8(crtc_state->nv12_planes)); return -EINVAL; @@ -13767,7 +13837,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state, * or * cdclk/crtc_clock */ - mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3; + mult = is_planar_yuv_format(pixel_format) ? 2 : 3; tmpclk1 = (1 << 16) * mult - 1; tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); max_scale = min(tmpclk1, tmpclk2); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 15db41394b9e..375f51d14dda 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1796,6 +1796,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); void intel_attach_force_audio_property(struct drm_connector *connector); void intel_attach_broadcast_rgb_property(struct drm_connector *connector); void intel_attach_aspect_ratio_property(struct drm_connector *connector); +void intel_attach_colorspace_property(struct drm_connector *connector); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); @@ -2300,6 +2301,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, /* intel_sprite.c */ +bool is_planar_yuv_format(u32 pixelformat); int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, @@ -2324,12 +2326,13 @@ static inline bool icl_is_nv12_y_plane(enum plane_id id) return false; } -static inline bool icl_is_hdr_plane(struct intel_plane *plane) +static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, + enum plane_id plane_id) { - if (INTEL_GEN(to_i915(plane->base.dev)) < 11) + if (INTEL_GEN(dev_priv) < 11) return false; - return plane->id < PLANE_SPRITE2; + return plane_id < PLANE_SPRITE2; } /* intel_tv.c */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f125a62eba8c..765718b606d8 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -498,6 +498,8 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder, else frame.avi.colorspace = HDMI_COLORSPACE_RGB; + drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state); + drm_hdmi_avi_infoframe_quant_range(&frame.avi, conn_state->connector, adjusted_mode, @@ -2143,10 +2145,21 @@ static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_digital_port *intel_dig_port = + hdmi_to_dig_port(intel_hdmi); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); + + /* + * Attach Colorspace property for Non LSPCON based device + * ToDo: This needs to be extended for LSPCON implementation + * as well. Will be implemented separately. + */ + if (!intel_dig_port->lspcon.active) + intel_attach_colorspace_property(connector); + drm_connector_attach_content_type_property(connector); connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 54307f1df6cf..14ac31888c67 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3970,7 +3970,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); - if (fourcc == DRM_FORMAT_NV12) + if (is_planar_yuv_format(fourcc)) swap(val, val2); skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); @@ -4180,7 +4180,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, if (intel_plane->id == PLANE_CURSOR) return 0; - if (plane == 1 && format != DRM_FORMAT_NV12) + if (plane == 1 && !is_planar_yuv_format(format)) return 0; /* @@ -4192,7 +4192,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, height = drm_rect_height(&intel_pstate->base.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ - if (plane == 1 && format == DRM_FORMAT_NV12) { + if (plane == 1 && is_planar_yuv_format(format)) { width /= 2; height /= 2; } @@ -4578,9 +4578,9 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, const struct drm_framebuffer *fb = pstate->fb; u32 interm_pbpl; - /* only NV12 format has two planes */ - if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) { - DRM_DEBUG_KMS("Non NV12 format have single plane\n"); + /* only planar format has two planes */ + if (color_plane == 1 && !is_planar_yuv_format(fb->format->format)) { + DRM_DEBUG_KMS("Non planar format have single plane\n"); return -EINVAL; } @@ -4591,7 +4591,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = fb->format->format == DRM_FORMAT_NV12; + wp->is_planar = is_planar_yuv_format(fb->format->format); if (plane->id == PLANE_CURSOR) { wp->width = intel_pstate->base.crtc_w; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index b56a1a9ad01d..53174d579574 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,6 +41,19 @@ #include "i915_drv.h" #include <drm/drm_color_mgmt.h> +bool is_planar_yuv_format(u32 pixelformat) +{ + switch (pixelformat) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return true; + default: + return false; + } +} + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -335,8 +348,8 @@ skl_program_scaler(struct intel_plane *plane, 0, INT_MAX); /* TODO: handle sub-pixel coordinates */ - if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 && - !icl_is_hdr_plane(plane)) { + if (is_planar_yuv_format(plane_state->base.fb->format->format) && + !icl_is_hdr_plane(dev_priv, plane->id)) { y_hphase = skl_scaler_calc_phase(1, hscale, false); y_vphase = skl_scaler_calc_phase(1, vscale, false); @@ -518,7 +531,7 @@ skl_program_plane(struct intel_plane *plane, I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), (plane_state->color_plane[1].offset - surf_addr) | aux_stride); - if (icl_is_hdr_plane(plane)) { + if (icl_is_hdr_plane(dev_priv, plane_id)) { u32 cus_ctl = 0; if (linked) { @@ -542,7 +555,7 @@ skl_program_plane(struct intel_plane *plane, if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); - if (fb->format->is_yuv && icl_is_hdr_plane(plane)) + if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); skl_write_plane_wm(plane, crtc_state); @@ -1482,8 +1495,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, /* * 90/270 is not allowed with RGB64 16:16:16:16 and * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. - * TBD: Add RGB64 case once its added in supported format - * list. */ switch (fb->format->format) { case DRM_FORMAT_RGB565: @@ -1491,6 +1502,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, break; /* fall through */ case DRM_FORMAT_C8: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", drm_get_format_name(fb->format->format, &format_name)); @@ -1551,10 +1566,10 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s int src_w = drm_rect_width(&plane_state->base.src) >> 16; /* Display WA #1106 */ - if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 && + if (is_planar_yuv_format(fb->format->format) && src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { - DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n"); + DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); return -EINVAL; } @@ -1790,6 +1805,52 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_VYUY, }; +static const uint32_t icl_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const uint32_t icl_hdr_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_ARGB16161616F, + DRM_FORMAT_ABGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + static const u32 skl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, @@ -1806,6 +1867,79 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_NV12, }; +static const uint32_t glk_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, +}; + +static const uint32_t icl_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const uint32_t icl_hdr_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_ARGB16161616F, + DRM_FORMAT_ABGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + static const u64 skl_plane_format_modifiers_noccs[] = { I915_FORMAT_MOD_Yf_TILED, I915_FORMAT_MOD_Y_TILED, @@ -1945,10 +2079,23 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; /* fall through */ case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) @@ -2085,8 +2232,25 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->update_slave = icl_update_slave; if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { - formats = skl_planar_formats; - num_formats = ARRAY_SIZE(skl_planar_formats); + if (icl_is_hdr_plane(dev_priv, plane_id)) { + formats = icl_hdr_planar_formats; + num_formats = ARRAY_SIZE(icl_hdr_planar_formats); + } else if (INTEL_GEN(dev_priv) >= 11) { + formats = icl_planar_formats; + num_formats = ARRAY_SIZE(icl_planar_formats); + } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) { + formats = glk_planar_formats; + num_formats = ARRAY_SIZE(glk_planar_formats); + } else { + formats = skl_planar_formats; + num_formats = ARRAY_SIZE(skl_planar_formats); + } + } else if (icl_is_hdr_plane(dev_priv, plane_id)) { + formats = icl_hdr_plane_formats; + num_formats = ARRAY_SIZE(icl_hdr_plane_formats); + } else if (INTEL_GEN(dev_priv) >= 11) { + formats = icl_plane_formats; + num_formats = ARRAY_SIZE(icl_plane_formats); } else { formats = skl_plane_formats; num_formats = ARRAY_SIZE(skl_plane_formats); diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c index 23abf03736e7..3f9921ba4a76 100644 --- a/drivers/gpu/drm/i915/intel_vdsc.c +++ b/drivers/gpu/drm/i915/intel_vdsc.c @@ -317,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component) } } -static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) -{ - unsigned long groups_per_line = 0; - unsigned long groups_total = 0; - unsigned long num_extra_mux_bits = 0; - unsigned long slice_bits = 0; - unsigned long hrd_delay = 0; - unsigned long final_scale = 0; - unsigned long rbs_min = 0; - - /* Number of groups used to code each line of a slice */ - groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, - DSC_RC_PIXELS_PER_GROUP); - - /* chunksize in Bytes */ - vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * - vdsc_cfg->bits_per_pixel, - (8 * 16)); - - if (vdsc_cfg->convert_rgb) - num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + - (4 * vdsc_cfg->bits_per_component + 4) - - 2); - else - num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + - (4 * vdsc_cfg->bits_per_component + 4) + - 2 * (4 * vdsc_cfg->bits_per_component) - 2; - /* Number of bits in one Slice */ - slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; - - while ((num_extra_mux_bits > 0) && - ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) - num_extra_mux_bits--; - - if (groups_per_line < vdsc_cfg->initial_scale_value - 8) - vdsc_cfg->initial_scale_value = groups_per_line + 8; - - /* scale_decrement_interval calculation according to DSC spec 1.11 */ - if (vdsc_cfg->initial_scale_value > 8) - vdsc_cfg->scale_decrement_interval = groups_per_line / - (vdsc_cfg->initial_scale_value - 8); - else - vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; - - vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - - (vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; - - if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { - DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); - return -ERANGE; - } - - final_scale = (vdsc_cfg->rc_model_size * 8) / - (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); - if (vdsc_cfg->slice_height > 1) - /* - * NflBpgOffset is 16 bit value with 11 fractional bits - * hence we multiply by 2^11 for preserving the - * fractional part - */ - vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), - (vdsc_cfg->slice_height - 1)); - else - vdsc_cfg->nfl_bpg_offset = 0; - - /* 2^16 - 1 */ - if (vdsc_cfg->nfl_bpg_offset > 65535) { - DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); - return -ERANGE; - } - - /* Number of groups used to code the entire slice */ - groups_total = groups_per_line * vdsc_cfg->slice_height; - - /* slice_bpg_offset is 16 bit value with 11 fractional bits */ - vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - - vdsc_cfg->initial_offset + - num_extra_mux_bits) << 11), - groups_total); - - if (final_scale > 9) { - /* - * ScaleIncrementInterval = - * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) - * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, - * we need divide by 2^11 from pstDscCfg values - */ - vdsc_cfg->scale_increment_interval = - (vdsc_cfg->final_offset * (1 << 11)) / - ((vdsc_cfg->nfl_bpg_offset + - vdsc_cfg->slice_bpg_offset) * - (final_scale - 9)); - } else { - /* - * If finalScaleValue is less than or equal to 9, a value of 0 should - * be used to disable the scale increment at the end of the slice - */ - vdsc_cfg->scale_increment_interval = 0; - } - - if (vdsc_cfg->scale_increment_interval > 65535) { - DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); - return -ERANGE; - } - - /* - * DSC spec mentions that bits_per_pixel specifies the target - * bits/pixel (bpp) rate that is used by the encoder, - * in steps of 1/16 of a bit per pixel - */ - rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + - DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel, 16) + - groups_per_line * vdsc_cfg->first_line_bpg_offset; - - hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); - vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; - vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; - - return 0; -} - int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { @@ -491,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; /* Gen 11 does not support YCbCr */ - vdsc_cfg->enable422 = false; + vdsc_cfg->simple_422 = false; /* Gen 11 does not support VBR */ vdsc_cfg->vbr_enable = false; vdsc_cfg->block_pred_enable = @@ -574,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); - return intel_compute_rc_parameters(vdsc_cfg); + return drm_dsc_compute_rc_parameters(vdsc_cfg); } enum intel_display_power_domain @@ -618,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_BLOCK_PREDICTION; if (vdsc_cfg->convert_rgb) pps_val |= DSC_COLOR_SPACE_CONVERSION; - if (vdsc_cfg->enable422) + if (vdsc_cfg->simple_422) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; @@ -1004,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ - drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp); + drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ - drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg); + drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg); intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_PPS, &dp_dsc_pps_sdp, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index c935cbe059a7..3e8bece620df 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -185,7 +185,7 @@ static int compare_of(struct device *dev, void *data) } /* Special case for LDB, one device for two channels */ - if (of_node_cmp(np->name, "lvds-channel") == 0) { + if (of_node_name_eq(np, "lvds-channel")) { np = of_get_parent(np); of_node_put(np); } diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile index 7709f2fbb9f7..d4ea82fc493b 100644 --- a/drivers/gpu/drm/meson/Makefile +++ b/drivers/gpu/drm/meson/Makefile @@ -1,5 +1,5 @@ meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o -meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o +meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o obj-$(CONFIG_DRM_MESON) += meson-drm.o obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c deleted file mode 100644 index 5de11aa7c775..000000000000 --- a/drivers/gpu/drm/meson/meson_canvas.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2016 BayLibre, SAS - * Author: Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2015 Amlogic, Inc. All rights reserved. - * Copyright (C) 2014 Endless Mobile - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include "meson_drv.h" -#include "meson_canvas.h" -#include "meson_registers.h" - -/** - * DOC: Canvas - * - * CANVAS is a memory zone where physical memory frames information - * are stored for the VIU to scanout. - */ - -/* DMC Registers */ -#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */ -#define CANVAS_WIDTH_LBIT 29 -#define CANVAS_WIDTH_LWID 3 -#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */ -#define CANVAS_WIDTH_HBIT 0 -#define CANVAS_HEIGHT_BIT 9 -#define CANVAS_BLKMODE_BIT 24 -#define CANVAS_ENDIAN_BIT 26 -#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */ -#define CANVAS_LUT_WR_EN (0x2 << 8) -#define CANVAS_LUT_RD_EN (0x1 << 8) - -void meson_canvas_setup(struct meson_drm *priv, - uint32_t canvas_index, uint32_t addr, - uint32_t stride, uint32_t height, - unsigned int wrap, - unsigned int blkmode, - unsigned int endian) -{ - unsigned int val; - - regmap_write(priv->dmc, DMC_CAV_LUT_DATAL, - (((addr + 7) >> 3)) | - (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT)); - - regmap_write(priv->dmc, DMC_CAV_LUT_DATAH, - ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) << - CANVAS_WIDTH_HBIT) | - (height << CANVAS_HEIGHT_BIT) | - (wrap << 22) | - (blkmode << CANVAS_BLKMODE_BIT) | - (endian << CANVAS_ENDIAN_BIT)); - - regmap_write(priv->dmc, DMC_CAV_LUT_ADDR, - CANVAS_LUT_WR_EN | canvas_index); - - /* Force a read-back to make sure everything is flushed. */ - regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val); -} diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h deleted file mode 100644 index 85dbf26e2826..000000000000 --- a/drivers/gpu/drm/meson/meson_canvas.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2016 BayLibre, SAS - * Author: Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2014 Endless Mobile - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -/* Canvas LUT Memory */ - -#ifndef __MESON_CANVAS_H -#define __MESON_CANVAS_H - -#define MESON_CANVAS_ID_OSD1 0x4e -#define MESON_CANVAS_ID_VD1_0 0x60 -#define MESON_CANVAS_ID_VD1_1 0x61 -#define MESON_CANVAS_ID_VD1_2 0x62 - -/* Canvas configuration. */ -#define MESON_CANVAS_WRAP_NONE 0x00 -#define MESON_CANVAS_WRAP_X 0x01 -#define MESON_CANVAS_WRAP_Y 0x02 - -#define MESON_CANVAS_BLKMODE_LINEAR 0x00 -#define MESON_CANVAS_BLKMODE_32x32 0x01 -#define MESON_CANVAS_BLKMODE_64x64 0x02 - -#define MESON_CANVAS_ENDIAN_SWAP16 0x1 -#define MESON_CANVAS_ENDIAN_SWAP32 0x3 -#define MESON_CANVAS_ENDIAN_SWAP64 0x7 -#define MESON_CANVAS_ENDIAN_SWAP128 0xf - -void meson_canvas_setup(struct meson_drm *priv, - uint32_t canvas_index, uint32_t addr, - uint32_t stride, uint32_t height, - unsigned int wrap, - unsigned int blkmode, - unsigned int endian); - -#endif /* __MESON_CANVAS_H */ diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 43e29984f8b1..6d9311e254ef 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -37,7 +37,6 @@ #include "meson_venc.h" #include "meson_vpp.h" #include "meson_viu.h" -#include "meson_canvas.h" #include "meson_registers.h" /* CRTC definition */ @@ -214,13 +213,7 @@ void meson_crtc_irq(struct meson_drm *priv) writel_relaxed(priv->viu.osd_sc_v_ctrl0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); - if (priv->canvas) - meson_canvas_config(priv->canvas, priv->canvas_id_osd1, - priv->viu.osd1_addr, priv->viu.osd1_stride, - priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, 0); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, + meson_canvas_config(priv->canvas, priv->canvas_id_osd1, priv->viu.osd1_addr, priv->viu.osd1_stride, priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, MESON_CANVAS_BLKMODE_LINEAR, 0); @@ -237,61 +230,34 @@ void meson_crtc_irq(struct meson_drm *priv) switch (priv->viu.vd1_planes) { case 3: - if (priv->canvas) - meson_canvas_config(priv->canvas, - priv->canvas_id_vd1_2, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); + meson_canvas_config(priv->canvas, + priv->canvas_id_vd1_2, + priv->viu.vd1_addr2, + priv->viu.vd1_stride2, + priv->viu.vd1_height2, + MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR, + MESON_CANVAS_ENDIAN_SWAP64); /* fallthrough */ case 2: - if (priv->canvas) - meson_canvas_config(priv->canvas, - priv->canvas_id_vd1_1, - priv->viu.vd1_addr1, - priv->viu.vd1_stride1, - priv->viu.vd1_height1, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); + meson_canvas_config(priv->canvas, + priv->canvas_id_vd1_1, + priv->viu.vd1_addr1, + priv->viu.vd1_stride1, + priv->viu.vd1_height1, + MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR, + MESON_CANVAS_ENDIAN_SWAP64); /* fallthrough */ case 1: - if (priv->canvas) - meson_canvas_config(priv->canvas, - priv->canvas_id_vd1_0, - priv->viu.vd1_addr0, - priv->viu.vd1_stride0, - priv->viu.vd1_height0, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); + meson_canvas_config(priv->canvas, + priv->canvas_id_vd1_0, + priv->viu.vd1_addr0, + priv->viu.vd1_stride0, + priv->viu.vd1_height0, + MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR, + MESON_CANVAS_ENDIAN_SWAP64); }; writel_relaxed(priv->viu.vd1_if0_gen_reg, diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2281ed3eb774..70f9d7b85e8e 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -48,7 +48,6 @@ #include "meson_vpp.h" #include "meson_viu.h" #include "meson_venc.h" -#include "meson_canvas.h" #include "meson_registers.h" #define DRIVER_NAME "meson" @@ -231,50 +230,31 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } priv->canvas = meson_canvas_get(dev); - if (!IS_ERR(priv->canvas)) { - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1); - if (ret) - goto free_drm; - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - goto free_drm; - } - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - goto free_drm; - } - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); - goto free_drm; - } - } else { - priv->canvas = NULL; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); - if (!res) { - ret = -EINVAL; - goto free_drm; - } - /* Simply ioremap since it may be a shared register zone */ - regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!regs) { - ret = -EADDRNOTAVAIL; - goto free_drm; - } + if (IS_ERR(priv->canvas)) { + ret = PTR_ERR(priv->canvas); + goto free_drm; + } - priv->dmc = devm_regmap_init_mmio(dev, regs, - &meson_regmap_config); - if (IS_ERR(priv->dmc)) { - dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); - ret = PTR_ERR(priv->dmc); - goto free_drm; - } + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1); + if (ret) + goto free_drm; + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); + if (ret) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); + goto free_drm; + } + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); + if (ret) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); + goto free_drm; + } + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); + if (ret) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); + goto free_drm; } priv->vsync_irq = platform_get_irq(pdev, 0); diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 4dccf4cd042a..214a7cb18ce2 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -29,7 +29,6 @@ struct meson_drm { struct device *dev; void __iomem *io_base; struct regmap *hhi; - struct regmap *dmc; int vsync_irq; struct meson_canvas *canvas; diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index 691a9fd16b36..b54a22e483b9 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -22,7 +22,6 @@ #include "meson_overlay.h" #include "meson_vpp.h" #include "meson_viu.h" -#include "meson_canvas.h" #include "meson_registers.h" /* VD1_IF0_GEN_REG */ @@ -350,13 +349,6 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, DRM_DEBUG_DRIVER("\n"); - /* Fallback is canvas provider is not available */ - if (!priv->canvas) { - priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0; - priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1; - priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2; - } - interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; spin_lock_irqsave(&priv->drm->event_lock, flags); diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 6119a0224278..b7786218cb10 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -38,7 +38,6 @@ #include "meson_plane.h" #include "meson_vpp.h" #include "meson_viu.h" -#include "meson_canvas.h" #include "meson_registers.h" /* OSD_SCI_WH_M1 */ @@ -148,10 +147,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | OSD_BLK0_ENABLE; - if (priv->canvas) - canvas_id_osd1 = priv->canvas_id_osd1; - else - canvas_id_osd1 = MESON_CANVAS_ID_OSD1; + canvas_id_osd1 = priv->canvas_id_osd1; /* Set up BLK0 to point to the right canvas */ priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) | diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index e46e05f50bad..ac0f3687e09a 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -25,7 +25,6 @@ #include "meson_viu.h" #include "meson_vpp.h" #include "meson_venc.h" -#include "meson_canvas.h" #include "meson_registers.h" /** diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 0bdd93648761..4697d854b827 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1027,7 +1027,6 @@ static struct drm_driver msm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_res_obj = msm_gem_prime_res_obj, .gem_prime_pin = msm_gem_prime_pin, .gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c56dade2c1dc..163e24d2ab99 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -292,7 +292,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); void *msm_gem_prime_vmap(struct drm_gem_object *obj); void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 18ca651ab942..a72c648ba6e7 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; struct dma_fence *fence; int i, ret; - fobj = reservation_object_get_list(msm_obj->resv); + fobj = reservation_object_get_list(obj->resv); if (!fobj || (fobj->shared_count == 0)) { - fence = reservation_object_get_excl(msm_obj->resv); + fence = reservation_object_get_excl(obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { ret = dma_fence_wait(fence, true); @@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, for (i = 0; i < fobj->shared_count; i++) { fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(msm_obj->resv)); + reservation_object_held(obj->resv)); if (fence->context != fctx->context) { ret = dma_fence_wait(fence, true); if (ret) @@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); msm_obj->gpu = gpu; if (exclusive) - reservation_object_add_excl_fence(msm_obj->resv, fence); + reservation_object_add_excl_fence(obj->resv, fence); else - reservation_object_add_shared_fence(msm_obj->resv, fence); + reservation_object_add_shared_fence(obj->resv, fence); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); } @@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); bool write = !!(op & MSM_PREP_WRITE); unsigned long remain = op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, + ret = reservation_object_wait_timeout_rcu(obj->resv, write, true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; @@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type, void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct reservation_object *robj = msm_obj->resv; + struct reservation_object *robj = obj->resv; struct reservation_object_list *fobj; struct dma_fence *fence; struct msm_gem_vma *vma; @@ -883,9 +881,6 @@ void msm_gem_free_object(struct drm_gem_object *obj) put_pages(obj); } - if (msm_obj->resv == &msm_obj->_resv) - reservation_object_fini(msm_obj->resv); - drm_gem_object_release(obj); mutex_unlock(&msm_obj->lock); @@ -945,12 +940,8 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; - if (resv) { - msm_obj->resv = resv; - } else { - msm_obj->resv = &msm_obj->_resv; - reservation_object_init(msm_obj->resv); - } + if (resv) + msm_obj->base.resv = resv; INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->vmas); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 13403c6da6c7..60bb290700ce 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) if (!obj->import_attach) msm_gem_put_pages(obj); } - -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - return msm_obj->resv; -} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 12b983fc0b56..df302521ec74 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -173,7 +173,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); if (submit->bos[i].flags & BO_LOCKED) - ww_mutex_unlock(&msm_obj->resv->lock); + ww_mutex_unlock(&msm_obj->base.resv->lock); if (backoff && !(submit->bos[i].flags & BO_VALID)) submit->bos[i].iova = 0; @@ -196,7 +196,7 @@ retry: contended = i; if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, + ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock, &submit->ticket); if (ret) goto fail; @@ -218,7 +218,7 @@ fail: if (ret == -EDEADLK) { struct msm_gem_object *msm_obj = submit->bos[contended].obj; /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, + ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock, &submit->ticket); if (!ret) { submit->bos[contended].flags |= BO_LOCKED; @@ -244,7 +244,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) * strange place to call it. OTOH this is a * convenient can-fail point to hook it in. */ - ret = reservation_object_reserve_shared(msm_obj->resv, + ret = reservation_object_reserve_shared(msm_obj->base.resv, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 3e070153ef21..f53f817356db 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -149,6 +149,15 @@ config DRM_PANEL_RAYDIUM_RM68200 Say Y here if you want to enable support for Raydium RM68200 720x1280 DSI video mode panel. +config DRM_PANEL_RONBO_RB070D30 + tristate "Ronbo Electronics RB070D30 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Ronbo Electronics + RB070D30 1024x600 DSI panel. + config DRM_PANEL_SAMSUNG_S6D16D0 tristate "Samsung S6D16D0 DSI video mode panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index e7ab71968bbf..7834947a53b0 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o +obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c new file mode 100644 index 000000000000..3c15764f0c03 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018-2019, Bridge Systems BV + * Copyright (C) 2018-2019, Bootlin + * Copyright (C) 2017, Free Electrons + * + * This file based on panel-ilitek-ili9881c.c + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/fb.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> + +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_connector.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> + +struct rb070d30_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct backlight_device *backlight; + struct regulator *supply; + + struct { + struct gpio_desc *power; + struct gpio_desc *reset; + struct gpio_desc *updn; + struct gpio_desc *shlr; + } gpios; +}; + +static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel) +{ + return container_of(panel, struct rb070d30_panel, panel); +} + +static int rb070d30_panel_prepare(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + int ret; + + ret = regulator_enable(ctx->supply); + if (ret < 0) { + DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret); + return ret; + } + + msleep(20); + gpiod_set_value(ctx->gpios.power, 1); + msleep(20); + gpiod_set_value(ctx->gpios.reset, 1); + msleep(20); + return 0; +} + +static int rb070d30_panel_unprepare(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + + gpiod_set_value(ctx->gpios.reset, 0); + gpiod_set_value(ctx->gpios.power, 0); + regulator_disable(ctx->supply); + + return 0; +} + +static int rb070d30_panel_enable(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + int ret; + + ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); + if (ret) + return ret; + + ret = backlight_enable(ctx->backlight); + if (ret) + goto out; + + return 0; + +out: + mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); + return ret; +} + +static int rb070d30_panel_disable(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + + backlight_disable(ctx->backlight); + return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); +} + +/* Default timings */ +static const struct drm_display_mode default_mode = { + .clock = 51206, + .hdisplay = 1024, + .hsync_start = 1024 + 160, + .hsync_end = 1024 + 160 + 80, + .htotal = 1024 + 160 + 80 + 80, + .vdisplay = 600, + .vsync_start = 600 + 12, + .vsync_end = 600 + 12 + 10, + .vtotal = 600 + 12 + 10 + 13, + .vrefresh = 60, + + .width_mm = 154, + .height_mm = 85, +}; + +static int rb070d30_panel_get_modes(struct drm_panel *panel) +{ + struct drm_connector *connector = panel->connector; + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + mode = drm_mode_duplicate(panel->drm, &default_mode); + if (!mode) { + DRM_DEV_ERROR(&ctx->dsi->dev, + "Failed to add mode " DRM_MODE_FMT "\n", + DRM_MODE_ARG(&default_mode)); + return -EINVAL; + } + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + panel->connector->display_info.bpc = 8; + panel->connector->display_info.width_mm = mode->width_mm; + panel->connector->display_info.height_mm = mode->height_mm; + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + + return 1; +} + +static const struct drm_panel_funcs rb070d30_panel_funcs = { + .get_modes = rb070d30_panel_get_modes, + .prepare = rb070d30_panel_prepare, + .enable = rb070d30_panel_enable, + .disable = rb070d30_panel_disable, + .unprepare = rb070d30_panel_unprepare, +}; + +static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) +{ + struct rb070d30_panel *ctx; + int ret; + + ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd"); + if (IS_ERR(ctx->supply)) + return PTR_ERR(ctx->supply); + + mipi_dsi_set_drvdata(dsi, ctx); + ctx->dsi = dsi; + + drm_panel_init(&ctx->panel); + ctx->panel.dev = &dsi->dev; + ctx->panel.funcs = &rb070d30_panel_funcs; + + ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.reset)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n"); + return PTR_ERR(ctx->gpios.reset); + } + + ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.power)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n"); + return PTR_ERR(ctx->gpios.power); + } + + /* + * We don't change the state of that GPIO later on but we need + * to force it into a low state. + */ + ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.updn)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n"); + return PTR_ERR(ctx->gpios.updn); + } + + /* + * We don't change the state of that GPIO later on but we need + * to force it into a low state. + */ + ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.shlr)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n"); + return PTR_ERR(ctx->gpios.shlr); + } + + ctx->backlight = devm_of_find_backlight(&dsi->dev); + if (IS_ERR(ctx->backlight)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n"); + return PTR_ERR(ctx->backlight); + } + + ret = drm_panel_add(&ctx->panel); + if (ret < 0) + return ret; + + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->lanes = 4; + + return mipi_dsi_attach(dsi); +} + +static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi) +{ + struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi); + + mipi_dsi_detach(dsi); + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id rb070d30_panel_of_match[] = { + { .compatible = "ronbo,rb070d30" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match); + +static struct mipi_dsi_driver rb070d30_panel_driver = { + .probe = rb070d30_panel_dsi_probe, + .remove = rb070d30_panel_dsi_remove, + .driver = { + .name = "panel-ronbo-rb070d30", + .of_match_table = rb070d30_panel_of_match, + }, +}; +module_mipi_dsi_driver(rb070d30_panel_driver); + +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>"); +MODULE_AUTHOR("Konstantin Sudakov <k.sudakov@integrasources.com>"); +MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 08c725544a2f..8b319ebbb0fb 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -535,7 +535,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, { struct qxl_device *qdev = plane->dev->dev_private; struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]); - struct qxl_bo *bo_old, *primary; + struct qxl_bo *primary; struct drm_clip_rect norect = { .x1 = 0, .y1 = 0, @@ -544,12 +544,6 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, }; uint32_t dumb_shadow_offset = 0; - if (old_state->fb) { - bo_old = gem_to_qxl_bo(old_state->fb->obj[0]); - } else { - bo_old = NULL; - } - primary = bo->shadow ? bo->shadow : bo; if (!primary->is_primary) { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 53f29a115104..0a9312ea250a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1388,7 +1388,7 @@ int radeon_device_init(struct radeon_device *rdev, pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); pr_warn("radeon: No coherent DMA available\n"); } - rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + rdev->need_swiotlb = drm_need_swiotlb(dma_bits); /* Registers mapping */ /* TODO: block userspace mapping of io register */ diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index fbed2c90fd51..286a0eeefcb6 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1615,7 +1615,7 @@ static int igt_topdown(void *ignored) DRM_RND_STATE(prng, random_seed); const unsigned int count = 8192; unsigned int size; - unsigned long *bitmap = NULL; + unsigned long *bitmap; struct drm_mm mm; struct drm_mm_node *nodes, *node, *next; unsigned int *order, n, m, o = 0; @@ -1631,8 +1631,7 @@ static int igt_topdown(void *ignored) if (!nodes) goto err; - bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), - GFP_KERNEL); + bitmap = bitmap_zalloc(count, GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1717,7 +1716,7 @@ out: drm_mm_takedown(&mm); kfree(order); err_bitmap: - kfree(bitmap); + bitmap_free(bitmap); err_nodes: vfree(nodes); err: @@ -1745,8 +1744,7 @@ static int igt_bottomup(void *ignored) if (!nodes) goto err; - bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), - GFP_KERNEL); + bitmap = bitmap_zalloc(count, GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1818,7 +1816,7 @@ out: drm_mm_takedown(&mm); kfree(order); err_bitmap: - kfree(bitmap); + bitmap_free(bitmap); err_nodes: vfree(nodes); err: diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index 35367ada3bc1..d15b10de1da6 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -6,7 +6,7 @@ config DRM_STM select DRM_KMS_CMA_HELPER select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS - select FB_PROVIDE_GET_FB_UNMAPPED_AREA + select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB help Enable support for the on-chip display controller on diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 4c0d51f73237..ee59da4a0172 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -720,33 +720,22 @@ static int sun4i_backend_free_sat(struct device *dev) { */ static int sun4i_backend_of_get_id(struct device_node *node) { - struct device_node *port, *ep; - int ret = -EINVAL; + struct device_node *ep, *remote; + struct of_endpoint of_ep; - /* input is port 0 */ - port = of_graph_get_port_by_id(node, 0); - if (!port) + /* Input port is 0, and we want the first endpoint. */ + ep = of_graph_get_endpoint_by_regs(node, 0, -1); + if (!ep) return -EINVAL; - /* try finding an upstream endpoint */ - for_each_available_child_of_node(port, ep) { - struct device_node *remote; - u32 reg; - - remote = of_graph_get_remote_endpoint(ep); - if (!remote) - continue; - - ret = of_property_read_u32(remote, "reg", ®); - if (ret) - continue; - - ret = reg; - } - - of_node_put(port); + remote = of_graph_get_remote_endpoint(ep); + of_node_put(ep); + if (!remote) + return -EINVAL; - return ret; + of_graph_parse_endpoint(remote, &of_ep); + of_node_put(remote); + return of_ep.id; } /* TODO: This needs to take multiple pipelines into account */ diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index 147b97ed1a09..3a3ba99fed22 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -20,7 +20,7 @@ struct sun4i_lvds { struct drm_connector connector; struct drm_encoder encoder; - struct sun4i_tcon *tcon; + struct drm_panel *panel; }; static inline struct sun4i_lvds * @@ -41,9 +41,8 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector) { struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector); - struct sun4i_tcon *tcon = lvds->tcon; - return drm_panel_get_modes(tcon->panel); + return drm_panel_get_modes(lvds->panel); } static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = { @@ -54,9 +53,8 @@ static void sun4i_lvds_connector_destroy(struct drm_connector *connector) { struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector); - struct sun4i_tcon *tcon = lvds->tcon; - drm_panel_detach(tcon->panel); + drm_panel_detach(lvds->panel); drm_connector_cleanup(connector); } @@ -71,26 +69,24 @@ static const struct drm_connector_funcs sun4i_lvds_con_funcs = { static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder) { struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder); - struct sun4i_tcon *tcon = lvds->tcon; DRM_DEBUG_DRIVER("Enabling LVDS output\n"); - if (tcon->panel) { - drm_panel_prepare(tcon->panel); - drm_panel_enable(tcon->panel); + if (lvds->panel) { + drm_panel_prepare(lvds->panel); + drm_panel_enable(lvds->panel); } } static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder) { struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder); - struct sun4i_tcon *tcon = lvds->tcon; DRM_DEBUG_DRIVER("Disabling LVDS output\n"); - if (tcon->panel) { - drm_panel_disable(tcon->panel); - drm_panel_unprepare(tcon->panel); + if (lvds->panel) { + drm_panel_disable(lvds->panel); + drm_panel_unprepare(lvds->panel); } } @@ -113,11 +109,10 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL); if (!lvds) return -ENOMEM; - lvds->tcon = tcon; encoder = &lvds->encoder; ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, - &tcon->panel, &bridge); + &lvds->panel, &bridge); if (ret) { dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n"); return 0; @@ -138,7 +133,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) /* The LVDS encoder can only work with the TCON channel 0 */ lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc); - if (tcon->panel) { + if (lvds->panel) { drm_connector_helper_add(&lvds->connector, &sun4i_lvds_con_helper_funcs); ret = drm_connector_init(drm, &lvds->connector, @@ -152,7 +147,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_connector_attach_encoder(&lvds->connector, &lvds->encoder); - ret = drm_panel_attach(tcon->panel, &lvds->connector); + ret = drm_panel_attach(lvds->panel, &lvds->connector); if (ret) { dev_err(drm->dev, "Couldn't attach our panel\n"); goto err_cleanup_connector; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index cae19e7bbeaa..d9e2502b49fa 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -27,6 +27,8 @@ struct sun4i_rgb { struct drm_encoder encoder; struct sun4i_tcon *tcon; + struct drm_panel *panel; + struct drm_bridge *bridge; }; static inline struct sun4i_rgb * @@ -47,11 +49,18 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_tcon *tcon = rgb->tcon; - return drm_panel_get_modes(tcon->panel); + return drm_panel_get_modes(rgb->panel); } +/* + * VESA DMT defines a tolerance of 0.5% on the pixel clock, while the + * CVT spec reuses that tolerance in its examples, so it looks to be a + * good default tolerance for the EDID-based modes. Define it to 5 per + * mille to avoid floating point operations. + */ +#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5 + static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, const struct drm_display_mode *mode) { @@ -59,8 +68,9 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, struct sun4i_tcon *tcon = rgb->tcon; u32 hsync = mode->hsync_end - mode->hsync_start; u32 vsync = mode->vsync_end - mode->vsync_start; - unsigned long rate = mode->clock * 1000; - long rounded_rate; + unsigned long long rate = mode->clock * 1000; + unsigned long long lowest, highest; + unsigned long long rounded_rate; DRM_DEBUG_DRIVER("Validating modes...\n"); @@ -92,15 +102,39 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, DRM_DEBUG_DRIVER("Vertical parameters OK\n"); + /* + * TODO: We should use the struct display_timing if available + * and / or trying to stretch the timings within that + * tolerancy to take care of panels that we wouldn't be able + * to have a exact match for. + */ + if (rgb->panel) { + DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks"); + goto out; + } + + /* + * That shouldn't ever happen unless something is really wrong, but it + * doesn't harm to check. + */ + if (!rgb->bridge) + goto out; + tcon->dclk_min_div = 6; tcon->dclk_max_div = 127; rounded_rate = clk_round_rate(tcon->dclk, rate); - if (rounded_rate < rate) + + lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE); + do_div(lowest, 1000); + if (rounded_rate < lowest) return MODE_CLOCK_LOW; - if (rounded_rate > rate) + highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE); + do_div(highest, 1000); + if (rounded_rate > highest) return MODE_CLOCK_HIGH; +out: DRM_DEBUG_DRIVER("Clock rate OK\n"); return MODE_OK; @@ -114,9 +148,8 @@ static void sun4i_rgb_connector_destroy(struct drm_connector *connector) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_tcon *tcon = rgb->tcon; - drm_panel_detach(tcon->panel); + drm_panel_detach(rgb->panel); drm_connector_cleanup(connector); } @@ -131,26 +164,24 @@ static const struct drm_connector_funcs sun4i_rgb_con_funcs = { static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_tcon *tcon = rgb->tcon; DRM_DEBUG_DRIVER("Enabling RGB output\n"); - if (tcon->panel) { - drm_panel_prepare(tcon->panel); - drm_panel_enable(tcon->panel); + if (rgb->panel) { + drm_panel_prepare(rgb->panel); + drm_panel_enable(rgb->panel); } } static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_tcon *tcon = rgb->tcon; DRM_DEBUG_DRIVER("Disabling RGB output\n"); - if (tcon->panel) { - drm_panel_disable(tcon->panel); - drm_panel_unprepare(tcon->panel); + if (rgb->panel) { + drm_panel_disable(rgb->panel); + drm_panel_unprepare(rgb->panel); } } @@ -172,7 +203,6 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = { int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) { struct drm_encoder *encoder; - struct drm_bridge *bridge; struct sun4i_rgb *rgb; int ret; @@ -183,7 +213,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) encoder = &rgb->encoder; ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, - &tcon->panel, &bridge); + &rgb->panel, &rgb->bridge); if (ret) { dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n"); return 0; @@ -204,7 +234,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) /* The RGB encoder can only work with the TCON channel 0 */ rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc); - if (tcon->panel) { + if (rgb->panel) { drm_connector_helper_add(&rgb->connector, &sun4i_rgb_con_helper_funcs); ret = drm_connector_init(drm, &rgb->connector, @@ -218,15 +248,15 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_connector_attach_encoder(&rgb->connector, &rgb->encoder); - ret = drm_panel_attach(tcon->panel, &rgb->connector); + ret = drm_panel_attach(rgb->panel, &rgb->connector); if (ret) { dev_err(drm->dev, "Couldn't attach our panel\n"); goto err_cleanup_connector; } } - if (bridge) { - ret = drm_bridge_attach(encoder, bridge, NULL); + if (rgb->bridge) { + ret = drm_bridge_attach(encoder, rgb->bridge, NULL); if (ret) { dev_err(drm->dev, "Couldn't attach our bridge\n"); goto err_cleanup_connector; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index ca713d200280..fa92e992a282 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon, u32 block_space, start_delay; u32 tcon_div; - tcon->dclk_min_div = 4; - tcon->dclk_max_div = 127; + tcon->dclk_min_div = SUN6I_DSI_TCON_DIV; + tcon->dclk_max_div = SUN6I_DSI_TCON_DIV; sun4i_tcon0_mode_set_common(tcon, mode); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index b5214d71610f..84cfb1952ff7 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -257,8 +257,6 @@ struct sun4i_tcon { struct reset_control *lcd_rst; struct reset_control *lvds_rst; - struct drm_panel *panel; - /* Platform adjustments */ const struct sun4i_tcon_quirks *quirks; diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 318994cd1b85..6ff585055a07 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -24,7 +24,9 @@ #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include "sun4i_crtc.h" #include "sun4i_drv.h" +#include "sun4i_tcon.h" #include "sun6i_mipi_dsi.h" #include <video/mipi_display.h> @@ -33,6 +35,8 @@ #define SUN6I_DSI_CTL_EN BIT(0) #define SUN6I_DSI_BASIC_CTL_REG 0x00c +#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4) +#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3) #define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2) #define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1) #define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0) @@ -153,6 +157,8 @@ #define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04) +#define SUN6I_DSI_SYNC_POINT 40 + enum sun6i_dsi_start_inst { DSI_START_LPRX, DSI_START_LPTX, @@ -358,7 +364,54 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi, static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { - return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1; + u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100); + u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start; + + if (delay > mode->vtotal) + delay = delay % mode->vtotal; + + return max_t(u16, delay, 1); +} + +static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi, + struct drm_display_mode *mode) +{ + struct mipi_dsi_device *device = dsi->device; + unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; + + return mode->htotal * Bpp / device->lanes; +} + +static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi, + struct drm_display_mode *mode, + u16 line_num, u16 edge1) +{ + u16 edge0 = edge1; + + edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8; + + if (edge0 > line_num) + return edge0 - line_num; + + return 1; +} + +static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi, + struct drm_display_mode *mode, + u16 line_num) +{ + struct mipi_dsi_device *device = dsi->device; + unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; + unsigned int hbp = mode->htotal - mode->hsync_end; + u16 edge1; + + edge1 = SUN6I_DSI_SYNC_POINT; + edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes; + + if (edge1 > line_num) + return line_num; + + return edge1; } static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, @@ -367,7 +420,23 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, struct mipi_dsi_device *device = dsi->device; u32 val = 0; - if ((mode->hsync_end - mode->hdisplay) > 20) { + if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + u16 line_num = sun6i_dsi_get_line_num(dsi, mode); + u16 edge0, edge1; + + edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num); + edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1); + + regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG, + SUN6I_DSI_BURST_DRQ_EDGE0(edge0) | + SUN6I_DSI_BURST_DRQ_EDGE1(edge1)); + + regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG, + SUN6I_DSI_BURST_LINE_NUM(line_num) | + SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT)); + + val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE; + } else if ((mode->hsync_end - mode->hdisplay) > 20) { /* Maaaaaagic */ u16 drq = (mode->hsync_end - mode->hdisplay) - 20; @@ -384,8 +453,19 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { + struct mipi_dsi_device *device = dsi->device; u16 delay = 50 - 1; + if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + delay = (mode->htotal - mode->hdisplay) * 150; + delay /= (mode->clock / 1000) * 8; + delay -= 50; + } + + regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG, + 2 << (4 * DSI_INST_ID_LP11) | + 3 << (4 * DSI_INST_ID_DLY)); + regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0), SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) | SUN6I_DSI_INST_LOOP_NUM_N1(delay)); @@ -451,49 +531,68 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, { struct mipi_dsi_device *device = dsi->device; unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; - u16 hbp, hfp, hsa, hblk, vblk; + u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0; + u32 basic_ctl = 0; size_t bytes; u8 *buffer; /* Do all timing calculations up front to allocate buffer space */ - /* - * A sync period is composed of a blanking packet (4 bytes + - * payload + 2 bytes) and a sync event packet (4 bytes). Its - * minimal size is therefore 10 bytes - */ -#define HSA_PACKET_OVERHEAD 10 - hsa = max((unsigned int)HSA_PACKET_OVERHEAD, - (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); + if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + hblk = mode->hdisplay * Bpp; + basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST | + SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS | + SUN6I_DSI_BASIC_CTL_HBP_DIS; - /* - * The backporch is set using a blanking packet (4 bytes + - * payload + 2 bytes). Its minimal size is therefore 6 bytes - */ + if (device->lanes == 4) + basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL | + SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc); + } else { + /* + * A sync period is composed of a blanking packet (4 + * bytes + payload + 2 bytes) and a sync event packet + * (4 bytes). Its minimal size is therefore 10 bytes + */ +#define HSA_PACKET_OVERHEAD 10 + hsa = max((unsigned int)HSA_PACKET_OVERHEAD, + (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); + + /* + * The backporch is set using a blanking packet (4 + * bytes + payload + 2 bytes). Its minimal size is + * therefore 6 bytes + */ #define HBP_PACKET_OVERHEAD 6 - hbp = max((unsigned int)HBP_PACKET_OVERHEAD, - (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD); - - /* - * The frontporch is set using a blanking packet (4 bytes + - * payload + 2 bytes). Its minimal size is therefore 6 bytes - */ + hbp = max((unsigned int)HBP_PACKET_OVERHEAD, + (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD); + + /* + * The frontporch is set using a blanking packet (4 + * bytes + payload + 2 bytes). Its minimal size is + * therefore 6 bytes + */ #define HFP_PACKET_OVERHEAD 6 - hfp = max((unsigned int)HFP_PACKET_OVERHEAD, - (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD); - - /* - * hblk seems to be the line + porches length. - */ - hblk = mode->htotal * Bpp - hsa; - - /* - * And I'm not entirely sure what vblk is about. The driver in - * Allwinner BSP is using a rather convoluted calculation - * there only for 4 lanes. However, using 0 (the !4 lanes - * case) even with a 4 lanes screen seems to work... - */ - vblk = 0; + hfp = max((unsigned int)HFP_PACKET_OVERHEAD, + (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD); + + /* + * The blanking is set using a sync event (4 bytes) + * and a blanking packet (4 bytes + payload + 2 + * bytes). Its minimal size is therefore 10 bytes. + */ +#define HBLK_PACKET_OVERHEAD 10 + hblk = max((unsigned int)HBLK_PACKET_OVERHEAD, + (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp - + HBLK_PACKET_OVERHEAD); + + /* + * And I'm not entirely sure what vblk is about. The driver in + * Allwinner BSP is using a rather convoluted calculation + * there only for 4 lanes. However, using 0 (the !4 lanes + * case) even with a 4 lanes screen seems to work... + */ + vblk = 0; + } /* How many bytes do we need to send all payloads? */ bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk); @@ -501,7 +600,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, if (WARN_ON(!buffer)) return; - regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0); + regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl); regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG, sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START, @@ -526,8 +625,8 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG, SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end - mode->vsync_start) | - SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start - - mode->vdisplay)); + SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal - + mode->vsync_end)); regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG, SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) | diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h index a07090579f84..5c3ad5be0690 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h @@ -13,6 +13,8 @@ #include <drm/drm_encoder.h> #include <drm/drm_mipi_dsi.h> +#define SUN6I_DSI_TCON_DIV 4 + struct sun6i_dsi { struct drm_connector connector; struct drm_encoder encoder; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 30a2eff55687..fd20a928cf4d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -325,38 +325,22 @@ static struct regmap_config sun8i_mixer_regmap_config = { static int sun8i_mixer_of_get_id(struct device_node *node) { - struct device_node *port, *ep; - int ret = -EINVAL; + struct device_node *ep, *remote; + struct of_endpoint of_ep; - /* output is port 1 */ - port = of_graph_get_port_by_id(node, 1); - if (!port) + /* Output port is 1, and we want the first endpoint. */ + ep = of_graph_get_endpoint_by_regs(node, 1, -1); + if (!ep) return -EINVAL; - /* try to find downstream endpoint */ - for_each_available_child_of_node(port, ep) { - struct device_node *remote; - u32 reg; - - remote = of_graph_get_remote_endpoint(ep); - if (!remote) - continue; - - ret = of_property_read_u32(remote, "reg", ®); - if (!ret) { - of_node_put(remote); - of_node_put(ep); - of_node_put(port); - - return reg; - } - - of_node_put(remote); - } - - of_node_put(port); + remote = of_graph_get_remote_endpoint(ep); + of_node_put(ep); + if (!remote) + return -EINVAL; - return ret; + of_graph_parse_endpoint(remote, &of_ep); + of_node_put(remote); + return of_ep.id; } static int sun8i_mixer_bind(struct device *dev, struct device *master, @@ -554,6 +538,7 @@ static int sun8i_mixer_remove(struct platform_device *pdev) static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = { .ccsc = 0, .scaler_mask = 0xf, + .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; @@ -561,6 +546,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = { static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = { .ccsc = 1, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; @@ -569,6 +555,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { .ccsc = 0, .mod_rate = 432000000, .scaler_mask = 0xf, + .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; @@ -577,6 +564,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { .ccsc = 0, .mod_rate = 297000000, .scaler_mask = 0xf, + .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; @@ -585,6 +573,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { .ccsc = 1, .mod_rate = 297000000, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; @@ -593,6 +582,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { .vi_num = 2, .ui_num = 1, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ccsc = 0, .mod_rate = 150000000, }; @@ -601,6 +591,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = { .ccsc = 0, .mod_rate = 297000000, .scaler_mask = 0xf, + .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, }; @@ -609,6 +600,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = { .ccsc = 1, .mod_rate = 297000000, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; @@ -618,6 +610,7 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = { .is_de3 = true, .mod_rate = 600000000, .scaler_mask = 0xf, + .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, }; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 913d14ce68b0..80e084caa084 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -159,6 +159,7 @@ struct de2_fmt_info { * @mod_rate: module clock rate that needs to be set in order to have * a functional block. * @is_de3: true, if this is next gen display engine 3.0, false otherwise. + * @scaline_yuv: size of a scanline for VI scaler for YUV formats. */ struct sun8i_mixer_cfg { int vi_num; @@ -167,6 +168,7 @@ struct sun8i_mixer_cfg { int ccsc; unsigned long mod_rate; unsigned int is_de3 : 1; + unsigned int scanline_yuv; }; struct sun8i_mixer { diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 8a0616238467..bb8e026d6405 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -80,6 +80,8 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, u32 bld_base, ch_base; u32 outsize, insize; u32 hphase, vphase; + u32 hn = 0, hm = 0; + u32 vn = 0, vm = 0; bool subsampled; DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n", @@ -137,12 +139,41 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, subsampled = format->hsub > 1 || format->vsub > 1; if (insize != outsize || subsampled || hphase || vphase) { - u32 hscale, vscale; + unsigned int scanline, required; + struct drm_display_mode *mode; + u32 hscale, vscale, fps; + u64 ability; DRM_DEBUG_DRIVER("HW scaling is enabled\n"); - hscale = state->src_w / state->crtc_w; - vscale = state->src_h / state->crtc_h; + mode = &plane->state->crtc->state->mode; + fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal); + ability = clk_get_rate(mixer->mod_clk); + /* BSP algorithm assumes 80% efficiency of VI scaler unit */ + ability *= 80; + do_div(ability, mode->vdisplay * fps * max(src_w, dst_w)); + + required = src_h * 100 / dst_h; + + if (ability < required) { + DRM_DEBUG_DRIVER("Using vertical coarse scaling\n"); + vm = src_h; + vn = (u32)ability * dst_h / 100; + src_h = vn; + } + + /* it seems that every RGB scaler has buffer for 2048 pixels */ + scanline = subsampled ? mixer->cfg->scanline_yuv : 2048; + + if (src_w > scanline) { + DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n"); + hm = src_w; + hn = scanline; + src_w = hn; + } + + hscale = (src_w << 16) / dst_w; + vscale = (src_h << 16) / dst_h; sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w, dst_h, hscale, vscale, hphase, vphase, @@ -153,6 +184,23 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, sun8i_vi_scaler_enable(mixer, channel, false); } + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(hn) | + SUN8I_MIXER_CHAN_VI_DS_M(hm)); + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(hn) | + SUN8I_MIXER_CHAN_VI_DS_M(hm)); + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(vn) | + SUN8I_MIXER_CHAN_VI_DS_M(vm)); + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(vn) | + SUN8I_MIXER_CHAN_VI_DS_M(vm)); + /* Set base coordinates */ DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n", state->dst.x1, state->dst.y1); diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h index 8a5e6d01c85d..a223a4839f45 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h @@ -24,6 +24,14 @@ ((base) + 0x30 * (layer) + 0x18 + 4 * (plane)) #define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \ ((base) + 0xe8) +#define SUN8I_MIXER_CHAN_VI_HDS_Y(base) \ + ((base) + 0xf0) +#define SUN8I_MIXER_CHAN_VI_HDS_UV(base) \ + ((base) + 0xf4) +#define SUN8I_MIXER_CHAN_VI_VDS_Y(base) \ + ((base) + 0xf8) +#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \ + ((base) + 0xfc) #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0) /* RGB mode should be set for RGB formats and cleared for YCbCr */ @@ -33,6 +41,9 @@ #define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) #define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24) +#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16) +#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0) + struct sun8i_mixer; struct sun8i_vi_layer { diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile index fb221e6f8885..6f8f764560e0 100644 --- a/drivers/gpu/drm/tinydrm/core/Makefile +++ b/drivers/gpu/drm/tinydrm/core/Makefile @@ -1,3 +1,3 @@ -tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o +tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c deleted file mode 100644 index 554abd5d3b53..000000000000 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (C) 2016 Noralf Trønnes - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <drm/drm_atomic.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_print.h> -#include <drm/tinydrm/tinydrm.h> -#include <linux/device.h> -#include <linux/dma-buf.h> -#include <linux/module.h> - -/** - * DOC: overview - * - * This library provides driver helpers for very simple display hardware. - * - * It is based on &drm_simple_display_pipe coupled with a &drm_connector which - * has only one fixed &drm_display_mode. The framebuffers are backed by the - * cma helper and have support for framebuffer flushing (dirty). - * fbdev support is also included. - * - */ - -/** - * DOC: core - * - * The driver allocates &tinydrm_device, initializes it using - * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init() - * and registers the DRM device using devm_tinydrm_register(). - */ - -static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = { - .fb_create = drm_gem_fb_create_with_dirty, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, -}; - -static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev, - struct drm_driver *driver) -{ - struct drm_device *drm; - - /* - * We don't embed drm_device, because that prevent us from using - * devm_kzalloc() to allocate tinydrm_device in the driver since - * drm_dev_put() frees the structure. The devm_ functions provide - * for easy error handling. - */ - drm = drm_dev_alloc(driver, parent); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - tdev->drm = drm; - drm->dev_private = tdev; - drm_mode_config_init(drm); - drm->mode_config.funcs = &tinydrm_mode_config_funcs; - drm->mode_config.allow_fb_modifiers = true; - - return 0; -} - -static void tinydrm_fini(struct tinydrm_device *tdev) -{ - drm_mode_config_cleanup(tdev->drm); - tdev->drm->dev_private = NULL; - drm_dev_put(tdev->drm); -} - -static void devm_tinydrm_release(void *data) -{ - tinydrm_fini(data); -} - -/** - * devm_tinydrm_init - Initialize tinydrm device - * @parent: Parent device object - * @tdev: tinydrm device - * @driver: DRM driver - * - * This function initializes @tdev, the underlying DRM device and it's - * mode_config. Resources will be automatically freed on driver detach (devres) - * using drm_mode_config_cleanup() and drm_dev_put(). - * - * Returns: - * Zero on success, negative error code on failure. - */ -int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, - struct drm_driver *driver) -{ - int ret; - - ret = tinydrm_init(parent, tdev, driver); - if (ret) - return ret; - - ret = devm_add_action(parent, devm_tinydrm_release, tdev); - if (ret) - tinydrm_fini(tdev); - - return ret; -} -EXPORT_SYMBOL(devm_tinydrm_init); - -static int tinydrm_register(struct tinydrm_device *tdev) -{ - struct drm_device *drm = tdev->drm; - int ret; - - ret = drm_dev_register(tdev->drm, 0); - if (ret) - return ret; - - ret = drm_fbdev_generic_setup(drm, 0); - if (ret) - DRM_ERROR("Failed to initialize fbdev: %d\n", ret); - - return 0; -} - -static void tinydrm_unregister(struct tinydrm_device *tdev) -{ - drm_atomic_helper_shutdown(tdev->drm); - drm_dev_unregister(tdev->drm); -} - -static void devm_tinydrm_register_release(void *data) -{ - tinydrm_unregister(data); -} - -/** - * devm_tinydrm_register - Register tinydrm device - * @tdev: tinydrm device - * - * This function registers the underlying DRM device and fbdev. - * These resources will be automatically unregistered on driver detach (devres) - * and the display pipeline will be disabled. - * - * Returns: - * Zero on success, negative error code on failure. - */ -int devm_tinydrm_register(struct tinydrm_device *tdev) -{ - struct device *dev = tdev->drm->dev; - int ret; - - ret = tinydrm_register(tdev); - if (ret) - return ret; - - ret = devm_add_action(dev, devm_tinydrm_register_release, tdev); - if (ret) - tinydrm_unregister(tdev); - - return ret; -} -EXPORT_SYMBOL(devm_tinydrm_register); - -/** - * tinydrm_shutdown - Shutdown tinydrm - * @tdev: tinydrm device - * - * This function makes sure that the display pipeline is disabled. - * Used by drivers in their shutdown callback to turn off the display - * on machine shutdown and reboot. - */ -void tinydrm_shutdown(struct tinydrm_device *tdev) -{ - drm_atomic_helper_shutdown(tdev->drm); -} -EXPORT_SYMBOL(tinydrm_shutdown); - -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c index 2737b6fdadc8..d7b38dfb6438 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c @@ -365,3 +365,5 @@ int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, EXPORT_SYMBOL(tinydrm_spi_transfer); #endif /* CONFIG_SPI */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c index bb5b1c1e21ba..bb8a7ed8ddf6 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c @@ -13,7 +13,7 @@ #include <drm/drm_modes.h> #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> -#include <drm/tinydrm/tinydrm.h> +#include <drm/drm_simple_kms_helper.h> struct tinydrm_connector { struct drm_connector base; @@ -129,7 +129,8 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode, /** * tinydrm_display_pipe_init - Initialize display pipe - * @tdev: tinydrm device + * @drm: DRM device + * @pipe: Display pipe * @funcs: Display pipe functions * @connector_type: Connector type * @formats: Array of supported formats (DRM_FORMAT\_\*) @@ -143,16 +144,15 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode, * Returns: * Zero on success, negative error code on failure. */ -int -tinydrm_display_pipe_init(struct tinydrm_device *tdev, - const struct drm_simple_display_pipe_funcs *funcs, - int connector_type, - const uint32_t *formats, - unsigned int format_count, - const struct drm_display_mode *mode, - unsigned int rotation) +int tinydrm_display_pipe_init(struct drm_device *drm, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + int connector_type, + const uint32_t *formats, + unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation) { - struct drm_device *drm = tdev->drm; struct drm_display_mode mode_copy; struct drm_connector *connector; int ret; @@ -177,7 +177,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev, if (IS_ERR(connector)) return PTR_ERR(connector); - return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats, + return drm_simple_display_pipe_init(drm, pipe, funcs, formats, format_count, modifiers, connector); } EXPORT_SYMBOL(tinydrm_display_pipe_init); diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c index 8bbd0beafc6a..fab961dded87 100644 --- a/drivers/gpu/drm/tinydrm/hx8357d.c +++ b/drivers/gpu/drm/tinydrm/hx8357d.c @@ -16,7 +16,9 @@ #include <linux/property.h> #include <linux/spi/spi.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -46,16 +48,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); u8 addr_mode; - int ret; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_conditional_reset(mipi); if (ret < 0) - return; + goto out_exit; if (ret == 1) goto out_enable; @@ -171,6 +175,8 @@ out_enable: } mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { @@ -181,7 +187,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { }; static const struct drm_display_mode yx350hv15_mode = { - TINYDRM_MODE(320, 480, 60, 75), + DRM_SIMPLE_MODE(320, 480, 60, 75), }; DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); @@ -189,6 +195,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); static struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &hx8357d_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "hx8357d", @@ -213,15 +220,25 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id); static int hx8357d_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &hx8357d_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); if (IS_ERR(dc)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n"); @@ -238,21 +255,36 @@ static int hx8357d_probe(struct spi_device *spi) if (ret) return ret; - ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs, - &hx8357d_driver, &yx350hv15_mode, rotation); + ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); - return devm_tinydrm_register(&mipi->tinydrm); + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void hx8357d_shutdown(struct spi_device *spi) +static int hx8357d_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); - tinydrm_shutdown(&mipi->tinydrm); + return 0; +} + +static void hx8357d_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver hx8357d_spi_driver = { @@ -262,6 +294,7 @@ static struct spi_driver hx8357d_spi_driver = { }, .id_table = hx8357d_id, .probe = hx8357d_probe, + .remove = hx8357d_remove, .shutdown = hx8357d_shutdown, }; module_spi_driver(hx8357d_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c index 43a3b68d90a2..e9116ef4b5bc 100644 --- a/drivers/gpu/drm/tinydrm/ili9225.c +++ b/drivers/gpu/drm/tinydrm/ili9225.c @@ -20,9 +20,11 @@ #include <linux/spi/spi.h> #include <video/mipi_display.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -81,20 +83,22 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data) static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct tinydrm_device *tdev = fb->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; bool swap = mipi->swap_bytes; u16 x_start, y_start; u16 x1, x2, y1, y2; - int ret = 0; + int idx, ret = 0; bool full; void *tr; if (!mipi->enabled) return; + if (!drm_dev_enter(fb->dev, &idx)) + return; + full = width == fb->width && height == fb->height; DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -157,6 +161,8 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) err_msg: if (ret) dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); + + drm_dev_exit(idx); } static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe, @@ -181,19 +187,21 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); struct drm_framebuffer *fb = plane_state->fb; - struct device *dev = tdev->drm->dev; + struct device *dev = pipe->crtc.dev->dev; struct drm_rect rect = { .x1 = 0, .x2 = fb->width, .y1 = 0, .y2 = fb->height, }; - int ret; + int ret, idx; u8 am_id; + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + DRM_DEBUG_KMS("\n"); mipi_dbi_hw_reset(mipi); @@ -207,7 +215,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000); if (ret) { DRM_DEV_ERROR(dev, "Error sending command %d\n", ret); - return; + goto out_exit; } ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000); ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000); @@ -280,15 +288,23 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, mipi->enabled = true; ili9225_fb_dirty(fb, &rect); +out_exit: + drm_dev_exit(idx); } static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); DRM_DEBUG_KMS("\n"); + /* + * This callback is not protected by drm_dev_enter/exit since we want to + * turn off the display on regular driver unload. It's highly unlikely + * that the underlying SPI controller is gone should this be called after + * unplug. + */ + if (!mipi->enabled) return; @@ -301,7 +317,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) mipi->enabled = false; } -static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, +static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -311,11 +327,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) + if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); @@ -332,7 +348,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = { }; static const struct drm_display_mode ili9225_mode = { - TINYDRM_MODE(176, 220, 35, 44), + DRM_SIMPLE_MODE(176, 220, 35, 44), }; DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); @@ -341,6 +357,7 @@ static struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &ili9225_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .name = "ili9225", .desc = "Ilitek ILI9225", @@ -364,15 +381,25 @@ MODULE_DEVICE_TABLE(spi, ili9225_id); static int ili9225_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *rs; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &ili9225_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -394,21 +421,36 @@ static int ili9225_probe(struct spi_device *spi) /* override the command function set in mipi_dbi_spi_init() */ mipi->command = ili9225_dbi_command; - ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs, - &ili9225_driver, &ili9225_mode, rotation); + ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); - return devm_tinydrm_register(&mipi->tinydrm); + return 0; } -static void ili9225_shutdown(struct spi_device *spi) +static int ili9225_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} - tinydrm_shutdown(&mipi->tinydrm); +static void ili9225_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver ili9225_spi_driver = { @@ -419,6 +461,7 @@ static struct spi_driver ili9225_spi_driver = { }, .id_table = ili9225_id, .probe = ili9225_probe, + .remove = ili9225_remove, .shutdown = ili9225_shutdown, }; module_spi_driver(ili9225_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c index 713bb2dd7e04..d15f85e837ae 100644 --- a/drivers/gpu/drm/tinydrm/ili9341.c +++ b/drivers/gpu/drm/tinydrm/ili9341.c @@ -15,7 +15,9 @@ #include <linux/property.h> #include <linux/spi/spi.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -52,16 +54,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); u8 addr_mode; - int ret; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_conditional_reset(mipi); if (ret < 0) - return; + goto out_exit; if (ret == 1) goto out_enable; @@ -127,6 +131,8 @@ out_enable: addr_mode |= ILI9341_MADCTL_BGR; mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { @@ -137,7 +143,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { }; static const struct drm_display_mode yx240qv29_mode = { - TINYDRM_MODE(240, 320, 37, 49), + DRM_SIMPLE_MODE(240, 320, 37, 49), }; DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); @@ -145,6 +151,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); static struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &ili9341_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", @@ -169,15 +176,25 @@ MODULE_DEVICE_TABLE(spi, ili9341_id); static int ili9341_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &ili9341_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -200,21 +217,36 @@ static int ili9341_probe(struct spi_device *spi) if (ret) return ret; - ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs, - &ili9341_driver, &yx240qv29_mode, rotation); + ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); - return devm_tinydrm_register(&mipi->tinydrm); + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void ili9341_shutdown(struct spi_device *spi) +static int ili9341_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); - tinydrm_shutdown(&mipi->tinydrm); + return 0; +} + +static void ili9341_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver ili9341_spi_driver = { @@ -224,6 +256,7 @@ static struct spi_driver ili9341_spi_driver = { }, .id_table = ili9341_id, .probe = ili9341_probe, + .remove = ili9341_remove, .shutdown = ili9341_shutdown, }; module_spi_driver(ili9341_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c index 82a92ec9ae3c..c6dc31084a4e 100644 --- a/drivers/gpu/drm/tinydrm/mi0283qt.c +++ b/drivers/gpu/drm/tinydrm/mi0283qt.c @@ -17,7 +17,9 @@ #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -54,16 +56,18 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); u8 addr_mode; - int ret; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_conditional_reset(mipi); if (ret < 0) - return; + goto out_exit; if (ret == 1) goto out_enable; @@ -135,6 +139,8 @@ out_enable: addr_mode |= ILI9341_MADCTL_BGR; mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { @@ -145,7 +151,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { }; static const struct drm_display_mode mi0283qt_mode = { - TINYDRM_MODE(320, 240, 58, 43), + DRM_SIMPLE_MODE(320, 240, 58, 43), }; DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); @@ -154,6 +160,7 @@ static struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &mi0283qt_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", @@ -178,15 +185,25 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id); static int mi0283qt_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -213,35 +230,46 @@ static int mi0283qt_probe(struct spi_device *spi) if (ret) return ret; - ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs, - &mi0283qt_driver, &mi0283qt_mode, rotation); + ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation); + if (ret) + return ret; + + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); if (ret) return ret; - spi_set_drvdata(spi, mipi); + spi_set_drvdata(spi, drm); - return devm_tinydrm_register(&mipi->tinydrm); + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void mi0283qt_shutdown(struct spi_device *spi) +static int mi0283qt_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); - tinydrm_shutdown(&mipi->tinydrm); + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; } -static int __maybe_unused mi0283qt_pm_suspend(struct device *dev) +static void mi0283qt_shutdown(struct spi_device *spi) { - struct mipi_dbi *mipi = dev_get_drvdata(dev); + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); +} - return drm_mode_config_helper_suspend(mipi->tinydrm.drm); +static int __maybe_unused mi0283qt_pm_suspend(struct device *dev) +{ + return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); } static int __maybe_unused mi0283qt_pm_resume(struct device *dev) { - struct mipi_dbi *mipi = dev_get_drvdata(dev); - - drm_mode_config_helper_resume(mipi->tinydrm.drm); + drm_mode_config_helper_resume(dev_get_drvdata(dev)); return 0; } @@ -259,6 +287,7 @@ static struct spi_driver mi0283qt_spi_driver = { }, .id_table = mi0283qt_id, .probe = mi0283qt_probe, + .remove = mi0283qt_remove, .shutdown = mi0283qt_shutdown, }; module_spi_driver(mi0283qt_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c index 918f77c7de34..869c8f56da3b 100644 --- a/drivers/gpu/drm/tinydrm/mipi-dbi.c +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c @@ -153,16 +153,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read); */ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) { + u8 *cmdbuf; int ret; + /* SPI requires dma-safe buffers */ + cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL); + if (!cmdbuf) + return -ENOMEM; + mutex_lock(&mipi->cmdlock); - ret = mipi->command(mipi, cmd, data, len); + ret = mipi->command(mipi, cmdbuf, data, len); mutex_unlock(&mipi->cmdlock); + kfree(cmdbuf); + return ret; } EXPORT_SYMBOL(mipi_dbi_command_buf); +/* This should only be used by mipi_dbi_command() */ +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) +{ + u8 *buf; + int ret; + + buf = kmemdup(data, len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mipi_dbi_command_buf(mipi, cmd, buf, len); + + kfree(buf); + + return ret; +} +EXPORT_SYMBOL(mipi_dbi_command_stackbuf); + /** * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary * @dst: The destination buffer @@ -216,18 +242,20 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy); static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct tinydrm_device *tdev = fb->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; bool swap = mipi->swap_bytes; - int ret = 0; + int idx, ret = 0; bool full; void *tr; if (!mipi->enabled) return; + if (!drm_dev_enter(fb->dev, &idx)) + return; + full = width == fb->width && height == fb->height; DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -254,6 +282,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) err_msg: if (ret) dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); + + drm_dev_exit(idx); } /** @@ -308,19 +338,29 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi, .y1 = 0, .y2 = fb->height, }; + int idx; + + if (!drm_dev_enter(&mipi->drm, &idx)) + return; mipi->enabled = true; mipi_dbi_fb_dirty(fb, &rect); backlight_enable(mipi->backlight); + + drm_dev_exit(idx); } EXPORT_SYMBOL(mipi_dbi_enable_flush); static void mipi_dbi_blank(struct mipi_dbi *mipi) { - struct drm_device *drm = mipi->tinydrm.drm; + struct drm_device *drm = &mipi->drm; u16 height = drm->mode_config.min_height; u16 width = drm->mode_config.min_width; size_t len = width * height * 2; + int idx; + + if (!drm_dev_enter(drm, &idx)) + return; memset(mipi->tx_buf, 0, len); @@ -330,6 +370,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi) (height >> 8) & 0xFF, (height - 1) & 0xFF); mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, (u8 *)mipi->tx_buf, len); + + drm_dev_exit(idx); } /** @@ -342,8 +384,10 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi) */ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); + + if (!mipi->enabled) + return; DRM_DEBUG_KMS("\n"); @@ -359,6 +403,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) } EXPORT_SYMBOL(mipi_dbi_pipe_disable); +static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + static const uint32_t mipi_dbi_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -366,31 +416,27 @@ static const uint32_t mipi_dbi_formats[] = { /** * mipi_dbi_init - MIPI DBI initialization - * @dev: Parent device * @mipi: &mipi_dbi structure to initialize - * @pipe_funcs: Display pipe functions - * @driver: DRM driver + * @funcs: Display pipe functions * @mode: Display mode * @rotation: Initial rotation in degrees Counter Clock Wise * - * This function initializes a &mipi_dbi structure and it's underlying - * @tinydrm_device. It also sets up the display pipeline. + * This function sets up a &drm_simple_display_pipe with a &drm_connector that + * has one fixed &drm_display_mode which is rotated according to @rotation. + * This mode is used to set the mode config min/max width/height properties. + * Additionally &mipi_dbi.tx_buf is allocated. * * Supported formats: Native RGB565 and emulated XRGB8888. * - * Objects created by this function will be automatically freed on driver - * detach (devres). - * * Returns: * Zero on success, negative error code on failure. */ -int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, - const struct drm_simple_display_pipe_funcs *pipe_funcs, - struct drm_driver *driver, +int mipi_dbi_init(struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *funcs, const struct drm_display_mode *mode, unsigned int rotation) { size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16); - struct tinydrm_device *tdev = &mipi->tinydrm; + struct drm_device *drm = &mipi->drm; int ret; if (!mipi->command) @@ -398,16 +444,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, mutex_init(&mipi->cmdlock); - mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); + mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL); if (!mipi->tx_buf) return -ENOMEM; - ret = devm_tinydrm_init(dev, tdev, driver); - if (ret) - return ret; - /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */ - ret = tinydrm_display_pipe_init(tdev, pipe_funcs, + ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs, DRM_MODE_CONNECTOR_VIRTUAL, mipi_dbi_formats, ARRAY_SIZE(mipi_dbi_formats), mode, @@ -415,21 +457,40 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, if (ret) return ret; - drm_plane_enable_fb_damage_clips(&tdev->pipe.plane); + drm_plane_enable_fb_damage_clips(&mipi->pipe.plane); - tdev->drm->mode_config.preferred_depth = 16; + drm->mode_config.funcs = &mipi_dbi_mode_config_funcs; + drm->mode_config.preferred_depth = 16; mipi->rotation = rotation; - drm_mode_config_reset(tdev->drm); - DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", - tdev->drm->mode_config.preferred_depth, rotation); + drm->mode_config.preferred_depth, rotation); return 0; } EXPORT_SYMBOL(mipi_dbi_init); /** + * mipi_dbi_release - DRM driver release helper + * @drm: DRM device + * + * This function finalizes and frees &mipi_dbi. + * + * Drivers can use this as their &drm_driver->release callback. + */ +void mipi_dbi_release(struct drm_device *drm) +{ + struct mipi_dbi *dbi = drm_to_mipi_dbi(drm); + + DRM_DEBUG_DRIVER("\n"); + + drm_mode_config_cleanup(drm); + drm_dev_fini(drm); + kfree(dbi); +} +EXPORT_SYMBOL(mipi_dbi_release); + +/** * mipi_dbi_hw_reset - Hardware reset of controller * @mipi: MIPI DBI structure * @@ -481,7 +542,7 @@ EXPORT_SYMBOL(mipi_dbi_display_is_on); static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond) { - struct device *dev = mipi->tinydrm.drm->dev; + struct device *dev = mipi->drm.dev; int ret; if (mipi->regulator) { @@ -774,18 +835,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc, return 0; } -static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd, u8 *parameters, size_t num) { - unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; + unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return -ENOTSUPP; - MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); - ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8); + ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8); if (ret || !num) return ret; @@ -794,7 +855,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, /* MIPI DBI Type C Option 3 */ -static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd, u8 *data, size_t len) { struct spi_device *spi = mipi->spi; @@ -803,7 +864,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, struct spi_transfer tr[2] = { { .speed_hz = speed_hz, - .tx_buf = &cmd, + .tx_buf = cmd, .len = 1, }, { .speed_hz = speed_hz, @@ -821,8 +882,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, * Support non-standard 24-bit and 32-bit Nokia read commands which * start with a dummy clock, so we need to read an extra byte. */ - if (cmd == MIPI_DCS_GET_DISPLAY_ID || - cmd == MIPI_DCS_GET_DISPLAY_STATUS) { + if (*cmd == MIPI_DCS_GET_DISPLAY_ID || + *cmd == MIPI_DCS_GET_DISPLAY_STATUS) { if (!(len == 3 || len == 4)) return -EINVAL; @@ -852,7 +913,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); } - MIPI_DBI_DEBUG_COMMAND(cmd, data, len); + MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); err_free: kfree(buf); @@ -860,7 +921,7 @@ err_free: return ret; } -static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -868,18 +929,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, u32 speed_hz; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return mipi_dbi_typec3_command_read(mipi, cmd, par, num); - MIPI_DBI_DEBUG_COMMAND(cmd, par, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, par, num); gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); @@ -926,7 +987,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, * Even though it's not the SPI device that does DMA (the master does), * the dma mask is necessary for the dma_alloc_wc() in * drm_gem_cma_create(). The dma_addr returned will be a physical - * adddress which might be different from the bus address, but this is + * address which might be different from the bus address, but this is * not a problem since the address will not be used. * The virtual address is used in the transfer and the SPI core * re-maps it on the SPI master device using the DMA streaming API @@ -976,11 +1037,16 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, u8 val, cmd = 0, parameters[64]; char *buf, *pos, *token; unsigned int i; - int ret; + int ret, idx; + + if (!drm_dev_enter(&mipi->drm, &idx)) + return -ENODEV; buf = memdup_user_nul(ubuf, count); - if (IS_ERR(buf)) - return PTR_ERR(buf); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto err_exit; + } /* strip trailing whitespace */ for (i = count - 1; i > 0; i--) @@ -1016,6 +1082,8 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, err_free: kfree(buf); +err_exit: + drm_dev_exit(idx); return ret < 0 ? ret : count; } @@ -1024,8 +1092,11 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused) { struct mipi_dbi *mipi = m->private; u8 cmd, val[4]; + int ret, idx; size_t len; - int ret; + + if (!drm_dev_enter(&mipi->drm, &idx)) + return -ENODEV; for (cmd = 0; cmd < 255; cmd++) { if (!mipi_dbi_command_is_read(mipi, cmd)) @@ -1056,6 +1127,8 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused) seq_printf(m, "%*phN\n", (int)len, val); } + drm_dev_exit(idx); + return 0; } @@ -1088,8 +1161,7 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = { */ int mipi_dbi_debugfs_init(struct drm_minor *minor) { - struct tinydrm_device *tdev = minor->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev); umode_t mode = S_IFREG | S_IWUSR; if (mipi->read_commands) diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c index b037c6540cf3..3f3632457079 100644 --- a/drivers/gpu/drm/tinydrm/repaper.c +++ b/drivers/gpu/drm/tinydrm/repaper.c @@ -26,14 +26,16 @@ #include <linux/spi/spi.h> #include <linux/thermal.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> -#include <drm/tinydrm/tinydrm.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/tinydrm/tinydrm-helpers.h> #define REPAPER_RID_G2_COG_ID 0x12 @@ -59,7 +61,8 @@ enum repaper_epd_border_byte { }; struct repaper_epd { - struct tinydrm_device tinydrm; + struct drm_device drm; + struct drm_simple_display_pipe pipe; struct spi_device *spi; struct gpio_desc *panel_on; @@ -88,10 +91,9 @@ struct repaper_epd { bool partial; }; -static inline struct repaper_epd * -epd_from_tinydrm(struct tinydrm_device *tdev) +static inline struct repaper_epd *drm_to_epd(struct drm_device *drm) { - return container_of(tdev, struct repaper_epd, tinydrm); + return container_of(drm, struct repaper_epd, drm); } static int repaper_spi_transfer(struct spi_device *spi, u8 header, @@ -529,11 +531,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; - struct tinydrm_device *tdev = fb->dev->dev_private; - struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct repaper_epd *epd = drm_to_epd(fb->dev); struct drm_rect clip; + int idx, ret = 0; u8 *buf = NULL; - int ret = 0; + + if (!epd->enabled) + return 0; + + if (!drm_dev_enter(fb->dev, &idx)) + return -ENODEV; /* repaper can't do partial updates */ clip.x1 = 0; @@ -541,17 +548,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) clip.y1 = 0; clip.y2 = fb->height; - if (!epd->enabled) - return 0; - repaper_get_temperature(epd); DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id, epd->factored_stage_time); buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL); - if (!buf) - return -ENOMEM; + if (!buf) { + ret = -ENOMEM; + goto out_exit; + } if (import_attach) { ret = dma_buf_begin_cpu_access(import_attach->dmabuf, @@ -620,6 +626,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) out_free: kfree(buf); +out_exit: + drm_dev_exit(idx); return ret; } @@ -645,12 +653,14 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev); struct spi_device *spi = epd->spi; struct device *dev = &spi->dev; bool dc_ok = false; - int i, ret; + int i, ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_DRIVER("\n"); @@ -689,7 +699,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, if (!i) { DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n"); power_off(epd); - return; + goto out_exit; } repaper_read_id(spi); @@ -700,7 +710,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, else dev_err(dev, "wrong COG ID 0x%02x\n", ret); power_off(epd); - return; + goto out_exit; } /* Disable OE */ @@ -713,7 +723,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, else DRM_DEV_ERROR(dev, "panel is reported broken\n"); power_off(epd); - return; + goto out_exit; } /* Power saving mode */ @@ -753,7 +763,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, if (ret < 0) { DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret); power_off(epd); - return; + goto out_exit; } if (ret & 0x40) { @@ -765,7 +775,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, if (!dc_ok) { DRM_DEV_ERROR(dev, "dc/dc failed\n"); power_off(epd); - return; + goto out_exit; } /* @@ -776,15 +786,26 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, epd->enabled = true; epd->partial = false; +out_exit: + drm_dev_exit(idx); } static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev); struct spi_device *spi = epd->spi; unsigned int line; + /* + * This callback is not protected by drm_dev_enter/exit since we want to + * turn off the display on regular driver unload. It's highly unlikely + * that the underlying SPI controller is gone should this be called after + * unplug. + */ + + if (!epd->enabled) + return; + DRM_DEBUG_DRIVER("\n"); epd->enabled = false; @@ -855,33 +876,50 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, }; +static const struct drm_mode_config_funcs repaper_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void repaper_release(struct drm_device *drm) +{ + struct repaper_epd *epd = drm_to_epd(drm); + + DRM_DEBUG_DRIVER("\n"); + + drm_mode_config_cleanup(drm); + drm_dev_fini(drm); + kfree(epd); +} + static const uint32_t repaper_formats[] = { DRM_FORMAT_XRGB8888, }; static const struct drm_display_mode repaper_e1144cs021_mode = { - TINYDRM_MODE(128, 96, 29, 22), + DRM_SIMPLE_MODE(128, 96, 29, 22), }; static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x00 }; static const struct drm_display_mode repaper_e1190cs021_mode = { - TINYDRM_MODE(144, 128, 36, 32), + DRM_SIMPLE_MODE(144, 128, 36, 32), }; static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03, 0xfc, 0x00, 0x00, 0xff }; static const struct drm_display_mode repaper_e2200cs021_mode = { - TINYDRM_MODE(200, 96, 46, 22), + DRM_SIMPLE_MODE(200, 96, 46, 22), }; static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xe0, 0x00 }; static const struct drm_display_mode repaper_e2271cs021_mode = { - TINYDRM_MODE(264, 176, 57, 38), + DRM_SIMPLE_MODE(264, 176, 57, 38), }; static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f, @@ -893,6 +931,7 @@ static struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &repaper_fops, + .release = repaper_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .name = "repaper", .desc = "Pervasive Displays RePaper e-ink panels", @@ -925,11 +964,11 @@ static int repaper_probe(struct spi_device *spi) const struct spi_device_id *spi_id; const struct of_device_id *match; struct device *dev = &spi->dev; - struct tinydrm_device *tdev; enum repaper_model model; const char *thermal_zone; struct repaper_epd *epd; size_t line_buffer_size; + struct drm_device *drm; int ret; match = of_match_device(repaper_of_match, dev); @@ -949,10 +988,21 @@ static int repaper_probe(struct spi_device *spi) } } - epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL); + epd = kzalloc(sizeof(*epd), GFP_KERNEL); if (!epd) return -ENOMEM; + drm = &epd->drm; + + ret = devm_drm_dev_init(dev, drm, &repaper_driver); + if (ret) { + kfree(epd); + return ret; + } + + drm_mode_config_init(drm); + drm->mode_config.funcs = &repaper_mode_config_funcs; + epd->spi = spi; epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW); @@ -1063,32 +1113,41 @@ static int repaper_probe(struct spi_device *spi) if (!epd->current_frame) return -ENOMEM; - tdev = &epd->tinydrm; - - ret = devm_tinydrm_init(dev, tdev, &repaper_driver); - if (ret) - return ret; - - ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs, + ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs, DRM_MODE_CONNECTOR_VIRTUAL, repaper_formats, ARRAY_SIZE(repaper_formats), mode, 0); if (ret) return ret; - drm_mode_config_reset(tdev->drm); - spi_set_drvdata(spi, tdev); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000); - return devm_tinydrm_register(tdev); + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void repaper_shutdown(struct spi_device *spi) +static int repaper_remove(struct spi_device *spi) { - struct tinydrm_device *tdev = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} - tinydrm_shutdown(tdev); +static void repaper_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver repaper_spi_driver = { @@ -1099,6 +1158,7 @@ static struct spi_driver repaper_spi_driver = { }, .id_table = repaper_id, .probe = repaper_probe, + .remove = repaper_remove, .shutdown = repaper_shutdown, }; module_spi_driver(repaper_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c index 01a8077954b3..d99957bac532 100644 --- a/drivers/gpu/drm/tinydrm/st7586.c +++ b/drivers/gpu/drm/tinydrm/st7586.c @@ -17,9 +17,11 @@ #include <linux/spi/spi.h> #include <video/mipi_display.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_rect.h> @@ -116,14 +118,15 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb, static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { - struct tinydrm_device *tdev = fb->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); - int start, end; - int ret = 0; + struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev); + int start, end, idx, ret = 0; if (!mipi->enabled) return; + if (!drm_dev_enter(fb->dev, &idx)) + return; + /* 3 pixels per byte, so grow clip to nearest multiple of 3 */ rect->x1 = rounddown(rect->x1, 3); rect->x2 = roundup(rect->x2, 3); @@ -151,6 +154,8 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) err_msg: if (ret) dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); + + drm_dev_exit(idx); } static void st7586_pipe_update(struct drm_simple_display_pipe *pipe, @@ -175,8 +180,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); struct drm_framebuffer *fb = plane_state->fb; struct drm_rect rect = { .x1 = 0, @@ -184,14 +188,17 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, .y1 = 0, .y2 = fb->height, }; - int ret; + int idx, ret; u8 addr_mode; + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_reset(mipi); if (ret) - return; + goto out_exit; mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f); mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00); @@ -244,12 +251,20 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, st7586_fb_dirty(fb, &rect); mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); +out_exit: + drm_dev_exit(idx); } static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); + + /* + * This callback is not protected by drm_dev_enter/exit since we want to + * turn off the display on regular driver unload. It's highly unlikely + * that the underlying SPI controller is gone should this be called after + * unplug. + */ DRM_DEBUG_KMS("\n"); @@ -264,46 +279,6 @@ static const u32 st7586_formats[] = { DRM_FORMAT_XRGB8888, }; -static int st7586_init(struct device *dev, struct mipi_dbi *mipi, - const struct drm_simple_display_pipe_funcs *pipe_funcs, - struct drm_driver *driver, const struct drm_display_mode *mode, - unsigned int rotation) -{ - size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay; - struct tinydrm_device *tdev = &mipi->tinydrm; - int ret; - - mutex_init(&mipi->cmdlock); - - mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); - if (!mipi->tx_buf) - return -ENOMEM; - - ret = devm_tinydrm_init(dev, tdev, driver); - if (ret) - return ret; - - ret = tinydrm_display_pipe_init(tdev, pipe_funcs, - DRM_MODE_CONNECTOR_VIRTUAL, - st7586_formats, - ARRAY_SIZE(st7586_formats), - mode, rotation); - if (ret) - return ret; - - drm_plane_enable_fb_damage_clips(&tdev->pipe.plane); - - tdev->drm->mode_config.preferred_depth = 32; - mipi->rotation = rotation; - - drm_mode_config_reset(tdev->drm); - - DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", - tdev->drm->mode_config.preferred_depth, rotation); - - return 0; -} - static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { .enable = st7586_pipe_enable, .disable = st7586_pipe_disable, @@ -311,8 +286,14 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, }; +static const struct drm_mode_config_funcs st7586_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + static const struct drm_display_mode st7586_mode = { - TINYDRM_MODE(178, 128, 37, 27), + DRM_SIMPLE_MODE(178, 128, 37, 27), }; DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); @@ -321,6 +302,7 @@ static struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &st7586_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", @@ -345,15 +327,35 @@ MODULE_DEVICE_TABLE(spi, st7586_id); static int st7586_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *a0; u32 rotation = 0; + size_t bufsize; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &st7586_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + drm->mode_config.preferred_depth = 32; + drm->mode_config.funcs = &st7586_mode_config_funcs; + + mutex_init(&mipi->cmdlock); + + bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay; + mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); + if (!mipi->tx_buf) + return -ENOMEM; + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -367,6 +369,7 @@ static int st7586_probe(struct spi_device *spi) } device_property_read_u32(dev, "rotation", &rotation); + mipi->rotation = rotation; ret = mipi_dbi_spi_init(spi, mipi, a0); if (ret) @@ -384,21 +387,44 @@ static int st7586_probe(struct spi_device *spi) */ mipi->swap_bytes = true; - ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver, - &st7586_mode, rotation); + ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + st7586_formats, ARRAY_SIZE(st7586_formats), + &st7586_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_plane_enable_fb_damage_clips(&mipi->pipe.plane); - return devm_tinydrm_register(&mipi->tinydrm); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", + drm->mode_config.preferred_depth, rotation); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void st7586_shutdown(struct spi_device *spi) +static int st7586_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} - tinydrm_shutdown(&mipi->tinydrm); +static void st7586_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver st7586_spi_driver = { @@ -409,6 +435,7 @@ static struct spi_driver st7586_spi_driver = { }, .id_table = st7586_id, .probe = st7586_probe, + .remove = st7586_remove, .shutdown = st7586_shutdown, }; module_spi_driver(st7586_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c index 3bab9a9569a6..022e9849b95b 100644 --- a/drivers/gpu/drm/tinydrm/st7735r.c +++ b/drivers/gpu/drm/tinydrm/st7735r.c @@ -14,7 +14,9 @@ #include <linux/spi/spi.h> #include <video/mipi_display.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/tinydrm/mipi-dbi.h> @@ -41,16 +43,18 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); - int ret; + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); + int ret, idx; u8 addr_mode; + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_reset(mipi); if (ret) - return; + goto out_exit; msleep(150); @@ -101,6 +105,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe, msleep(20); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = { @@ -111,7 +117,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = { }; static const struct drm_display_mode jd_t18003_t01_mode = { - TINYDRM_MODE(128, 160, 28, 35), + DRM_SIMPLE_MODE(128, 160, 28, 35), }; DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); @@ -120,6 +126,7 @@ static struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &st7735r_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", @@ -144,15 +151,25 @@ MODULE_DEVICE_TABLE(spi, st7735r_id); static int st7735r_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &st7735r_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -178,21 +195,36 @@ static int st7735r_probe(struct spi_device *spi) /* Cannot read from Adafruit 1.8" display via SPI */ mipi->read_commands = NULL; - ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs, - &st7735r_driver, &jd_t18003_t01_mode, rotation); + ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); - return devm_tinydrm_register(&mipi->tinydrm); + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void st7735r_shutdown(struct spi_device *spi) +static int st7735r_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); - tinydrm_shutdown(&mipi->tinydrm); + return 0; +} + +static void st7735r_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver st7735r_spi_driver = { @@ -203,6 +235,7 @@ static struct spi_driver st7735r_spi_driver = { }, .id_table = st7735r_id, .probe = st7735r_probe, + .remove = st7735r_remove, .shutdown = st7735r_shutdown, }; module_spi_driver(st7735r_spi_driver); diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 22cd2d13e272..53b7b8c04bc6 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -107,6 +107,7 @@ static void udl_usb_disconnect(struct usb_interface *interface) udl_fbdev_unplug(dev); udl_drop_usb(dev); drm_dev_unplug(dev); + drm_dev_put(dev); } /* diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig index 1552bf552c94..75a74c45f109 100644 --- a/drivers/gpu/drm/v3d/Kconfig +++ b/drivers/gpu/drm/v3d/Kconfig @@ -5,6 +5,7 @@ config DRM_V3D depends on COMMON_CLK depends on MMU select DRM_SCHED + select DRM_GEM_SHMEM_HELPER help Choose this option if you have a system that has a Broadcom V3D 3.x or newer GPU, such as BCM7268. diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a08766d39eab..c0219ebb4284 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -25,162 +25,6 @@ #include "v3d_drv.h" #include "uapi/drm/v3d_drm.h" -/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps - * it for DMA. - */ -static int -v3d_bo_get_pages(struct v3d_bo *bo) -{ - struct drm_gem_object *obj = &bo->base; - struct drm_device *dev = obj->dev; - int npages = obj->size >> PAGE_SHIFT; - int ret = 0; - - mutex_lock(&bo->lock); - if (bo->pages_refcount++ != 0) - goto unlock; - - if (!obj->import_attach) { - bo->pages = drm_gem_get_pages(obj); - if (IS_ERR(bo->pages)) { - ret = PTR_ERR(bo->pages); - goto unlock; - } - - bo->sgt = drm_prime_pages_to_sg(bo->pages, npages); - if (IS_ERR(bo->sgt)) { - ret = PTR_ERR(bo->sgt); - goto put_pages; - } - - /* Map the pages for use by the GPU. */ - dma_map_sg(dev->dev, bo->sgt->sgl, - bo->sgt->nents, DMA_BIDIRECTIONAL); - } else { - bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); - if (!bo->pages) - goto put_pages; - - drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages, - NULL, npages); - - /* Note that dma-bufs come in mapped. */ - } - - mutex_unlock(&bo->lock); - - return 0; - -put_pages: - drm_gem_put_pages(obj, bo->pages, true, true); - bo->pages = NULL; -unlock: - bo->pages_refcount--; - mutex_unlock(&bo->lock); - return ret; -} - -static void -v3d_bo_put_pages(struct v3d_bo *bo) -{ - struct drm_gem_object *obj = &bo->base; - - mutex_lock(&bo->lock); - if (--bo->pages_refcount == 0) { - if (!obj->import_attach) { - dma_unmap_sg(obj->dev->dev, bo->sgt->sgl, - bo->sgt->nents, DMA_BIDIRECTIONAL); - sg_free_table(bo->sgt); - kfree(bo->sgt); - drm_gem_put_pages(obj, bo->pages, true, true); - } else { - kfree(bo->pages); - } - } - mutex_unlock(&bo->lock); -} - -static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev, - size_t unaligned_size) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct drm_gem_object *obj; - struct v3d_bo *bo; - size_t size = roundup(unaligned_size, PAGE_SIZE); - int ret; - - if (size == 0) - return ERR_PTR(-EINVAL); - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); - obj = &bo->base; - - INIT_LIST_HEAD(&bo->vmas); - INIT_LIST_HEAD(&bo->unref_head); - mutex_init(&bo->lock); - - ret = drm_gem_object_init(dev, obj, size); - if (ret) - goto free_bo; - - spin_lock(&v3d->mm_lock); - ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, - obj->size >> PAGE_SHIFT, - GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); - spin_unlock(&v3d->mm_lock); - if (ret) - goto free_obj; - - return bo; - -free_obj: - drm_gem_object_release(obj); -free_bo: - kfree(bo); - return ERR_PTR(ret); -} - -struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, - size_t unaligned_size) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct drm_gem_object *obj; - struct v3d_bo *bo; - int ret; - - bo = v3d_bo_create_struct(dev, unaligned_size); - if (IS_ERR(bo)) - return bo; - obj = &bo->base; - - bo->resv = &bo->_resv; - reservation_object_init(bo->resv); - - ret = v3d_bo_get_pages(bo); - if (ret) - goto free_mm; - - v3d_mmu_insert_ptes(bo); - - mutex_lock(&v3d->bo_lock); - v3d->bo_stats.num_allocated++; - v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; - mutex_unlock(&v3d->bo_lock); - - return bo; - -free_mm: - spin_lock(&v3d->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&v3d->mm_lock); - - drm_gem_object_release(obj); - kfree(bo); - return ERR_PTR(ret); -} - /* Called DRM core on the last userspace/kernel unreference of the * BO. */ @@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); + v3d_mmu_remove_ptes(bo); + mutex_lock(&v3d->bo_lock); v3d->bo_stats.num_allocated--; v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; mutex_unlock(&v3d->bo_lock); - reservation_object_fini(&bo->_resv); - - v3d_bo_put_pages(bo); - - if (obj->import_attach) - drm_prime_gem_destroy(obj, bo->sgt); - - v3d_mmu_remove_ptes(bo); spin_lock(&v3d->mm_lock); drm_mm_remove_node(&bo->node); spin_unlock(&v3d->mm_lock); - mutex_destroy(&bo->lock); + /* GPU execution may have dirtied any pages in the BO. */ + bo->base.pages_mark_dirty_on_put = true; - drm_gem_object_release(obj); - kfree(bo); + drm_gem_shmem_free_object(obj); } -struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) +static const struct drm_gem_object_funcs v3d_gem_funcs = { + .free = v3d_free_object, + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +/* gem_create_object function for allocating a BO struct and doing + * early setup. + */ +struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) { - struct v3d_bo *bo = to_v3d_bo(obj); + struct v3d_bo *bo; + struct drm_gem_object *obj; - return bo->resv; -} + if (size == 0) + return NULL; -static void -v3d_set_mmap_vma_flags(struct vm_area_struct *vma) -{ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); -} + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + obj = &bo->base.base; -vm_fault_t v3d_gem_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_gem_object *obj = vma->vm_private_data; - struct v3d_bo *bo = to_v3d_bo(obj); - pfn_t pfn; - pgoff_t pgoff; + obj->funcs = &v3d_gem_funcs; - /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); + INIT_LIST_HEAD(&bo->unref_head); - return vmf_insert_mixed(vma, vmf->address, pfn); + return &bo->base.base; } -int v3d_mmap(struct file *filp, struct vm_area_struct *vma) +static int +v3d_bo_create_finish(struct drm_gem_object *obj) { + struct v3d_dev *v3d = to_v3d_dev(obj->dev); + struct v3d_bo *bo = to_v3d_bo(obj); + struct sg_table *sgt; int ret; - ret = drm_gem_mmap(filp, vma); + /* So far we pin the BO in the MMU for its lifetime, so use + * shmem's helper for getting a lifetime sgt. + */ + sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + spin_lock(&v3d->mm_lock); + /* Allocate the object's space in the GPU's page tables. + * Inserting PTEs will happen later, but the offset is for the + * lifetime of the BO. + */ + ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, + obj->size >> PAGE_SHIFT, + GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + spin_unlock(&v3d->mm_lock); if (ret) return ret; - v3d_set_mmap_vma_flags(vma); + /* Track stats for /debug/dri/n/bo_stats. */ + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated++; + v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); - return ret; + v3d_mmu_insert_ptes(bo); + + return 0; } -int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t unaligned_size) { + struct drm_gem_shmem_object *shmem_obj; + struct v3d_bo *bo; int ret; - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret < 0) - return ret; - - v3d_set_mmap_vma_flags(vma); + shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + if (!shmem_obj) + return NULL; + bo = to_v3d_bo(&shmem_obj->base); - return 0; -} + ret = v3d_bo_create_finish(&shmem_obj->base); + if (ret) + goto free_obj; -struct sg_table * -v3d_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct v3d_bo *bo = to_v3d_bo(obj); - int npages = obj->size >> PAGE_SHIFT; + return bo; - return drm_prime_pages_to_sg(bo->pages, npages); +free_obj: + drm_gem_shmem_free_object(&shmem_obj->base); + return ERR_PTR(ret); } struct drm_gem_object * @@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt) { struct drm_gem_object *obj; - struct v3d_bo *bo; - - bo = v3d_bo_create_struct(dev, attach->dmabuf->size); - if (IS_ERR(bo)) - return ERR_CAST(bo); - obj = &bo->base; - - bo->resv = attach->dmabuf->resv; + int ret; - bo->sgt = sgt; - obj->import_attach = attach; - v3d_bo_get_pages(bo); + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(obj)) + return obj; - v3d_mmu_insert_ptes(bo); + ret = v3d_bo_create_finish(obj); + if (ret) { + drm_gem_shmem_free_object(obj); + return ERR_PTR(ret); + } return obj; } @@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data, args->offset = bo->node.start << PAGE_SHIFT; - ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); - drm_gem_object_put_unlocked(&bo->base); + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, { struct drm_v3d_mmap_bo *args = data; struct drm_gem_object *gem_obj; - int ret; if (args->flags != 0) { DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); @@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, return -ENOENT; } - ret = drm_gem_create_mmap_offset(gem_obj); - if (ret == 0) - args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); drm_gem_object_put_unlocked(gem_obj); - return ret; + return 0; } int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index eb2b2d2f8553..a24af2d2f574 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) uint32_t cycles; int core = 0; int measure_ms = 1000; + int ret; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) + return ret; if (v3d->ver >= 40) { V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, @@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) cycles / (measure_ms * 1000), (cycles / (measure_ms * 100)) % 10); + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + return 0; } diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index f0afcec72c34..d600628bb5c1 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -7,9 +7,9 @@ * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. * For V3D 2.x support, see the VC4 driver. * - * Currently only single-core rendering using the binner and renderer - * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD - * (compute shader dispatch) are not yet supported. + * Currently only single-core rendering using the binner and renderer, + * along with TFU (texture formatting unit) rendering is supported. + * V3D 4.x's CSD (compute shader dispatch) is not yet supported. */ #include <linux/clk.h> @@ -19,6 +19,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> @@ -160,17 +161,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) kfree(v3d_priv); } -static const struct file_operations v3d_drm_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .mmap = v3d_mmap, - .poll = drm_poll, - .read = drm_read, - .compat_ioctl = drm_compat_ioctl, - .llseek = noop_llseek, -}; +DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops); /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP * protection between clients. Note that render nodes would be be @@ -188,12 +179,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; -static const struct vm_operations_struct v3d_vm_ops = { - .fault = v3d_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static struct drm_driver v3d_drm_driver = { .driver_features = (DRIVER_GEM | DRIVER_RENDER | @@ -207,17 +192,11 @@ static struct drm_driver v3d_drm_driver = { .debugfs_init = v3d_debugfs_init, #endif - .gem_free_object_unlocked = v3d_free_object, - .gem_vm_ops = &v3d_vm_ops, - + .gem_create_object = v3d_create_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import = drm_gem_prime_import, - .gem_prime_export = drm_gem_prime_export, - .gem_prime_res_obj = v3d_prime_res_obj, - .gem_prime_get_sg_table = v3d_prime_get_sg_table, .gem_prime_import_sg_table = v3d_prime_import_sg_table, - .gem_prime_mmap = v3d_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .ioctls = v3d_drm_ioctls, .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), @@ -265,10 +244,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) v3d->pdev = pdev; drm = &v3d->drm; - ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); - if (ret) - goto dev_free; - ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) goto dev_free; @@ -283,6 +258,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ + v3d->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(v3d->reset)) { + ret = PTR_ERR(v3d->reset); + + if (ret == -EPROBE_DEFER) + goto dev_free; + + v3d->reset = NULL; + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); + if (ret) { + dev_err(dev, + "Failed to get reset control or bridge regs\n"); + goto dev_free; + } + } + if (v3d->ver < 41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) @@ -312,14 +303,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) goto dev_destroy; - v3d_irq_init(v3d); + ret = v3d_irq_init(v3d); + if (ret) + goto gem_destroy; ret = drm_dev_register(drm, 0); if (ret) - goto gem_destroy; + goto irq_disable; return 0; +irq_disable: + v3d_irq_disable(v3d); gem_destroy: v3d_gem_destroy(drm); dev_destroy: diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index fdda3037f7af..7b0fe6240f7d 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0+ /* Copyright (C) 2015-2018 Broadcom */ -#include <linux/reservation.h> #include <linux/mm_types.h> #include <drm/drmP.h> #include <drm/drm_encoder.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/gpu_scheduler.h> #include "uapi/drm/v3d_drm.h" @@ -34,6 +34,7 @@ struct v3d_dev { * and revision. */ int ver; + bool single_irq_line; struct device *dev; struct platform_device *pdev; @@ -42,6 +43,7 @@ struct v3d_dev { void __iomem *bridge_regs; void __iomem *gca_regs; struct clk *clk; + struct reset_control *reset; /* Virtual and DMA addresses of the single shared page table. */ volatile u32 *pt; @@ -109,34 +111,15 @@ struct v3d_file_priv { struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; }; -/* Tracks a mapping of a BO into a per-fd address space */ -struct v3d_vma { - struct v3d_page_table *pt; - struct list_head list; /* entry in v3d_bo.vmas */ -}; - struct v3d_bo { - struct drm_gem_object base; - - struct mutex lock; + struct drm_gem_shmem_object base; struct drm_mm_node node; - u32 pages_refcount; - struct page **pages; - struct sg_table *sgt; - void *vaddr; - - struct list_head vmas; /* list of v3d_vma */ - /* List entry for the BO's position in * v3d_exec_info->unref_list */ struct list_head unref_head; - - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; }; static inline struct v3d_bo * @@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) } /* v3d_bo.c */ +struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size); void v3d_free_object(struct drm_gem_object *gem_obj); struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t size); @@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -vm_fault_t v3d_gem_fault(struct vm_fault *vmf); -int v3d_mmap(struct file *filp, struct vm_area_struct *vma); -struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); -int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); @@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); /* v3d_irq.c */ -void v3d_irq_init(struct v3d_dev *v3d); +int v3d_irq_init(struct v3d_dev *v3d); void v3d_irq_enable(struct v3d_dev *v3d); void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 803f31467ec1..b84d89c7b3fb 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/device.h> #include <linux/io.h> #include <linux/sched/signal.h> @@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core) * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ - V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); + if (v3d->ver < 40) + V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush * the whole thing. @@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d) } static void -v3d_reset_v3d(struct v3d_dev *v3d) +v3d_reset_by_bridge(struct v3d_dev *v3d) { int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); @@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d) V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); } +} + +static void +v3d_reset_v3d(struct v3d_dev *v3d) +{ + if (v3d->reset) + reset_control_reset(v3d->reset); + else + v3d_reset_by_bridge(v3d); v3d_init_hw_state(v3d); } @@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count, for (i = 0; i < bo_count; i++) { /* XXX: Use shared fences for read-only objects. */ - reservation_object_add_excl_fence(bos[i]->resv, fence); + reservation_object_add_excl_fence(bos[i]->base.base.resv, + fence); } } @@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos, int bo_count, struct ww_acquire_ctx *acquire_ctx) { - int i; - - for (i = 0; i < bo_count; i++) - ww_mutex_unlock(&bos[i]->resv->lock); - - ww_acquire_fini(acquire_ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count, + acquire_ctx); } /* Takes the reservation lock on all the BOs being referenced, so that @@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos, int bo_count, struct ww_acquire_ctx *acquire_ctx) { - int contended_lock = -1; int i, ret; - ww_acquire_init(acquire_ctx, &reservation_ww_class); - -retry: - if (contended_lock != -1) { - struct v3d_bo *bo = bos[contended_lock]; - - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - acquire_ctx); - if (ret) { - ww_acquire_done(acquire_ctx); - return ret; - } - } - - for (i = 0; i < bo_count; i++) { - if (i == contended_lock) - continue; - - ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock, - acquire_ctx); - if (ret) { - int j; - - for (j = 0; j < i; j++) - ww_mutex_unlock(&bos[j]->resv->lock); - - if (contended_lock != -1 && contended_lock >= i) { - struct v3d_bo *bo = bos[contended_lock]; - - ww_mutex_unlock(&bo->resv->lock); - } - - if (ret == -EDEADLK) { - contended_lock = i; - goto retry; - } - - ww_acquire_done(acquire_ctx); - return ret; - } - } - - ww_acquire_done(acquire_ctx); + ret = drm_gem_lock_reservations((struct drm_gem_object **)bos, + bo_count, acquire_ctx); + if (ret) + return ret; /* Reserve space for our shared (read-only) fence references, * before we commit the CL to the hardware. */ for (i = 0; i < bo_count; i++) { - ret = reservation_object_reserve_shared(bos[i]->resv, 1); + ret = reservation_object_reserve_shared(bos[i]->base.base.resv, + 1); if (ret) { v3d_unlock_bo_reservations(bos, bo_count, acquire_ctx); @@ -378,11 +347,11 @@ v3d_exec_cleanup(struct kref *ref) dma_fence_put(exec->render_done_fence); for (i = 0; i < exec->bo_count; i++) - drm_gem_object_put_unlocked(&exec->bo[i]->base); + drm_gem_object_put_unlocked(&exec->bo[i]->base.base); kvfree(exec->bo); list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { - drm_gem_object_put_unlocked(&bo->base); + drm_gem_object_put_unlocked(&bo->base.base); } pm_runtime_mark_last_busy(v3d->dev); @@ -409,7 +378,7 @@ v3d_tfu_job_cleanup(struct kref *ref) for (i = 0; i < ARRAY_SIZE(job->bo); i++) { if (job->bo[i]) - drm_gem_object_put_unlocked(&job->bo[i]->base); + drm_gem_object_put_unlocked(&job->bo[i]->base.base); } pm_runtime_mark_last_busy(v3d->dev); @@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, { int ret; struct drm_v3d_wait_bo *args = data; - struct drm_gem_object *gem_obj; - struct v3d_bo *bo; ktime_t start = ktime_get(); u64 delta_ns; unsigned long timeout_jiffies = @@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; - gem_obj = drm_gem_object_lookup(file_priv, args->handle); - if (!gem_obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); - return -EINVAL; - } - bo = to_v3d_bo(gem_obj); - - ret = reservation_object_wait_timeout_rcu(bo->resv, - true, true, - timeout_jiffies); - - if (ret == 0) - ret = -ETIME; - else if (ret > 0) - ret = 0; + ret = drm_gem_reservation_object_wait(file_priv, args->handle, + true, timeout_jiffies); /* Decrement the user's timeout, in case we got interrupted * such that the ioctl will be restarted. @@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; - drm_gem_object_put_unlocked(gem_obj); - return ret; } diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 69338da70ddc..b4d6ae81186d 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -27,6 +27,9 @@ V3D_HUB_INT_MMU_CAP | \ V3D_HUB_INT_TFUC)) +static irqreturn_t +v3d_hub_irq(int irq, void *arg); + static void v3d_overflow_mem_work(struct work_struct *work) { @@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work) container_of(work, struct v3d_dev, overflow_mem_work); struct drm_device *dev = &v3d->drm; struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); + struct drm_gem_object *obj; unsigned long irqflags; if (IS_ERR(bo)) { DRM_ERROR("Couldn't allocate binner overflow mem\n"); return; } + obj = &bo->base.base; /* We lost a race, and our work task came in after the bin job * completed and exited. This can happen because the HW @@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work) goto out; } - drm_gem_object_get(&bo->base); + drm_gem_object_get(obj); list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); - V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); + V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); out: - drm_gem_object_put_unlocked(&bo->base); + drm_gem_object_put_unlocked(obj); } static irqreturn_t @@ -112,6 +117,12 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_GMPV) dev_err(v3d->dev, "GMP violation\n"); + /* V3D 4.2 wires the hub and core IRQs together, so if we & + * didn't see the common one then check hub for MMU IRQs. + */ + if (v3d->single_irq_line && status == IRQ_NONE) + return v3d_hub_irq(irq, arg); + return status; } @@ -156,10 +167,10 @@ v3d_hub_irq(int irq, void *arg) return status; } -void +int v3d_irq_init(struct v3d_dev *v3d) { - int ret, core; + int irq1, ret, core; INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); @@ -170,16 +181,37 @@ v3d_irq_init(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); - ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), - v3d_hub_irq, IRQF_SHARED, - "v3d_hub", v3d); - ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), - v3d_irq, IRQF_SHARED, - "v3d_core0", v3d); - if (ret) - dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + irq1 = platform_get_irq(v3d->pdev, 1); + if (irq1 == -EPROBE_DEFER) + return irq1; + if (irq1 > 0) { + ret = devm_request_irq(v3d->dev, irq1, + v3d_irq, IRQF_SHARED, + "v3d_core0", v3d); + if (ret) + goto fail; + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), + v3d_hub_irq, IRQF_SHARED, + "v3d_hub", v3d); + if (ret) + goto fail; + } else { + v3d->single_irq_line = true; + + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), + v3d_irq, IRQF_SHARED, + "v3d", v3d); + if (ret) + goto fail; + } v3d_irq_enable(v3d); + return 0; + +fail: + if (ret != -EPROBE_DEFER) + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + return ret; } void diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index b00f97c31b70..7a21f1787ab1 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d) void v3d_mmu_insert_ptes(struct v3d_bo *bo) { - struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); + struct drm_gem_shmem_object *shmem_obj = &bo->base; + struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; unsigned int count; struct scatterlist *sgl; - for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) { + for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) { u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; u32 pte = page_prot | page_address; u32 i; @@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) } WARN_ON_ONCE(page - bo->node.start != - bo->base.size >> V3D_MMU_PAGE_SHIFT); + shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT); if (v3d_mmu_flush_all(v3d)) dev_err(v3d->dev, "MMU flush timeout\n"); @@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) void v3d_mmu_remove_ptes(struct v3d_bo *bo) { - struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); - u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT; + struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev); + u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT; u32 page; for (page = bo->node.start; page < bo->node.start + npages; page++) diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 6ccdee9d47bd..8e88af237610 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -216,6 +216,8 @@ # define V3D_IDENT2_BCG_INT BIT(28) #define V3D_CTL_MISCCFG 0x00018 +# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1) +# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1 # define V3D_MISCCFG_OVRTMUOUT BIT(0) #define V3D_CTL_L2CACTL 0x00020 diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 4704b2df3688..d0c68b7c8b41 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) mutex_lock(&v3d->reset_lock); /* block scheduler */ - for (q = 0; q < V3D_MAX_QUEUES; q++) { - struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; - - drm_sched_stop(sched); + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_stop(&v3d->queue[q].sched); - if(sched_job) - drm_sched_increase_karma(sched_job); - } + if (sched_job) + drm_sched_increase_karma(sched_job); /* get the GPU back into the init state */ v3d_reset(v3d); for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_resubmit_jobs(sched_job->sched); + drm_sched_resubmit_jobs(&v3d->queue[q].sched); /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig new file mode 100644 index 000000000000..1f4182e2e980 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/Kconfig @@ -0,0 +1,15 @@ +config DRM_VBOXVIDEO + tristate "Virtual Box Graphics Card" + depends on DRM && X86 && PCI + select DRM_KMS_HELPER + select DRM_TTM + select GENERIC_ALLOCATOR + help + This is a KMS driver for the virtual Graphics Card used in + Virtual Box virtual machines. + + Although it is possible to build this driver built-in to the + kernel, it is advised to build it as a module, so that it can + be updated independently of the kernel. Select M to build this + driver as a module and add support for these devices via drm/kms + interfaces. diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile new file mode 100644 index 000000000000..1224f313af0c --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \ + vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ + vbox_mode.o vbox_prime.o vbox_ttm.o + +obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c new file mode 100644 index 000000000000..361d3193258e --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: MIT +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#include <linux/vbox_err.h> +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_channels.h" +#include "hgsmi_ch_setup.h" + +/** + * Inform the host of the location of the host flags in VRAM via an HGSMI cmd. + * Return: 0 or negative errno value. + * @ctx: The context of the guest heap to use. + * @location: The offset chosen for the flags within guest VRAM. + */ +int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location) +{ + struct hgsmi_buffer_location *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI, + HGSMI_CC_HOST_FLAGS_LOCATION); + if (!p) + return -ENOMEM; + + p->buf_location = location; + p->buf_len = sizeof(struct hgsmi_host_flags); + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Notify the host of HGSMI-related guest capabilities via an HGSMI command. + * Return: 0 or negative errno value. + * @ctx: The context of the guest heap to use. + * @caps: The capabilities to report, see vbva_caps. + */ +int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps) +{ + struct vbva_caps *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS); + if (!p) + return -ENOMEM; + + p->rc = VERR_NOT_IMPLEMENTED; + p->caps = caps; + + hgsmi_buffer_submit(ctx, p); + + WARN_ON_ONCE(p->rc < 0); + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +int hgsmi_test_query_conf(struct gen_pool *ctx) +{ + u32 value = 0; + int ret; + + ret = hgsmi_query_conf(ctx, U32_MAX, &value); + if (ret) + return ret; + + return value == U32_MAX ? 0 : -EIO; +} + +/** + * Query the host for an HGSMI configuration parameter via an HGSMI command. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap used. + * @index: The index of the parameter to query. + * @value_ret: Where to store the value of the parameter on success. + */ +int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret) +{ + struct vbva_conf32 *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_QUERY_CONF32); + if (!p) + return -ENOMEM; + + p->index = index; + p->value = U32_MAX; + + hgsmi_buffer_submit(ctx, p); + + *value_ret = p->value; + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Pass the host a new mouse pointer shape via an HGSMI command. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap to be used. + * @flags: Cursor flags. + * @hot_x: Horizontal position of the hot spot. + * @hot_y: Vertical position of the hot spot. + * @width: Width in pixels of the cursor. + * @height: Height in pixels of the cursor. + * @pixels: Pixel data, @see VMMDevReqMousePointer for the format. + * @len: Size in bytes of the pixel data. + */ +int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, + u32 hot_x, u32 hot_y, u32 width, u32 height, + u8 *pixels, u32 len) +{ + struct vbva_mouse_pointer_shape *p; + u32 pixel_len = 0; + int rc; + + if (flags & VBOX_MOUSE_POINTER_SHAPE) { + /* + * Size of the pointer data: + * sizeof (AND mask) + sizeof (XOR_MASK) + */ + pixel_len = ((((width + 7) / 8) * height + 3) & ~3) + + width * 4 * height; + if (pixel_len > len) + return -EINVAL; + + /* + * If shape is supplied, then always create the pointer visible. + * See comments in 'vboxUpdatePointerShape' + */ + flags |= VBOX_MOUSE_POINTER_VISIBLE; + } + + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA, + VBVA_MOUSE_POINTER_SHAPE); + if (!p) + return -ENOMEM; + + p->result = VINF_SUCCESS; + p->flags = flags; + p->hot_X = hot_x; + p->hot_y = hot_y; + p->width = width; + p->height = height; + if (pixel_len) + memcpy(p->data, pixels, pixel_len); + + hgsmi_buffer_submit(ctx, p); + + switch (p->result) { + case VINF_SUCCESS: + rc = 0; + break; + case VERR_NO_MEMORY: + rc = -ENOMEM; + break; + case VERR_NOT_SUPPORTED: + rc = -EBUSY; + break; + default: + rc = -EINVAL; + } + + hgsmi_buffer_free(ctx, p); + + return rc; +} + +/** + * Report the guest cursor position. The host may wish to use this information + * to re-position its own cursor (though this is currently unlikely). The + * current host cursor position is returned. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap used. + * @report_position: Are we reporting a position? + * @x: Guest cursor X position. + * @y: Guest cursor Y position. + * @x_host: Host cursor X position is stored here. Optional. + * @y_host: Host cursor Y position is stored here. Optional. + */ +int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, + u32 x, u32 y, u32 *x_host, u32 *y_host) +{ + struct vbva_cursor_position *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_CURSOR_POSITION); + if (!p) + return -ENOMEM; + + p->report_position = report_position; + p->x = x; + p->y = y; + + hgsmi_buffer_submit(ctx, p); + + *x_host = p->x; + *y_host = p->y; + + hgsmi_buffer_free(ctx, p); + + return 0; +} diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h new file mode 100644 index 000000000000..4e93418d6a13 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#ifndef __HGSMI_CH_SETUP_H__ +#define __HGSMI_CH_SETUP_H__ + +/* + * Tell the host the location of hgsmi_host_flags structure, where the host + * can write information about pending buffers, etc, and which can be quickly + * polled by the guest without a need to port IO. + */ +#define HGSMI_CC_HOST_FLAGS_LOCATION 0 + +struct hgsmi_buffer_location { + u32 buf_location; + u32 buf_len; +} __packed; + +/* HGSMI setup and configuration data structures. */ + +#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u +#define HGSMIHOSTFLAGS_IRQ 0x02u +#define HGSMIHOSTFLAGS_VSYNC 0x10u +#define HGSMIHOSTFLAGS_HOTPLUG 0x20u +#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u + +struct hgsmi_host_flags { + u32 host_flags; + u32 reserved[3]; +} __packed; + +#endif diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_channels.h b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h new file mode 100644 index 000000000000..9b83f4ff3faf --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#ifndef __HGSMI_CHANNELS_H__ +#define __HGSMI_CHANNELS_H__ + +/* + * Each channel has an 8 bit identifier. There are a number of predefined + * (hardcoded) channels. + * + * HGSMI_CH_HGSMI channel can be used to map a string channel identifier + * to a free 16 bit numerical value. values are allocated in range + * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST]. + */ + +/* A reserved channel value */ +#define HGSMI_CH_RESERVED 0x00 +/* HGCMI: setup and configuration */ +#define HGSMI_CH_HGSMI 0x01 +/* Graphics: VBVA */ +#define HGSMI_CH_VBVA 0x02 +/* Graphics: Seamless with a single guest region */ +#define HGSMI_CH_SEAMLESS 0x03 +/* Graphics: Seamless with separate host windows */ +#define HGSMI_CH_SEAMLESS2 0x04 +/* Graphics: OpenGL HW acceleration */ +#define HGSMI_CH_OPENGL 0x05 + +/* The first channel index to be used for string mappings (inclusive) */ +#define HGSMI_CH_STRING_FIRST 0x20 +/* The last channel index for string mappings (inclusive) */ +#define HGSMI_CH_STRING_LAST 0xff + +#endif diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_defs.h b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h new file mode 100644 index 000000000000..6c8df1cdb087 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#ifndef __HGSMI_DEFS_H__ +#define __HGSMI_DEFS_H__ + +/* Buffer sequence type mask. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03 +/* Single buffer, not a part of a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00 +/* The first buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01 +/* A middle buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02 +/* The last buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03 + +/* 16 bytes buffer header. */ +struct hgsmi_buffer_header { + u32 data_size; /* Size of data that follows the header. */ + u8 flags; /* HGSMI_BUFFER_HEADER_F_* */ + u8 channel; /* The channel the data must be routed to. */ + u16 channel_info; /* Opaque to the HGSMI, used by the channel. */ + + union { + /* Opaque placeholder to make the union 8 bytes. */ + u8 header_data[8]; + + /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */ + struct { + u32 reserved1; /* A reserved field, initialize to 0. */ + u32 reserved2; /* A reserved field, initialize to 0. */ + } buffer; + + /* HGSMI_BUFFER_HEADER_F_SEQ_START */ + struct { + /* Must be the same for all buffers in the sequence. */ + u32 sequence_number; + /* The total size of the sequence. */ + u32 sequence_size; + } sequence_start; + + /* + * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and + * HGSMI_BUFFER_HEADER_F_SEQ_END + */ + struct { + /* Must be the same for all buffers in the sequence. */ + u32 sequence_number; + /* Data offset in the entire sequence. */ + u32 sequence_offset; + } sequence_continue; + } u; +} __packed; + +/* 8 bytes buffer tail. */ +struct hgsmi_buffer_tail { + /* Reserved, must be initialized to 0. */ + u32 reserved; + /* + * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html + * Over the header, offset and for first 4 bytes of the tail. + */ + u32 checksum; +} __packed; + +/* + * The size of the array of channels. Array indexes are u8. + * Note: the value must not be changed. + */ +#define HGSMI_NUMBER_OF_CHANNELS 0x100 + +#endif diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c new file mode 100644 index 000000000000..7580b9002379 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/modesetting.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#include <linux/vbox_err.h> +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_channels.h" + +/** + * Set a video mode via an HGSMI request. The views must have been + * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being + * set on the first display then it must be set first using registers. + * @ctx: The context containing the heap to use. + * @display: The screen number. + * @origin_x: The horizontal displacement relative to the first scrn. + * @origin_y: The vertical displacement relative to the first screen. + * @start_offset: The offset of the visible area of the framebuffer + * relative to the framebuffer start. + * @pitch: The offset in bytes between the starts of two adjecent + * scan lines in video RAM. + * @width: The mode width. + * @height: The mode height. + * @bpp: The colour depth of the mode. + * @flags: Flags. + */ +void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, + s32 origin_x, s32 origin_y, u32 start_offset, + u32 pitch, u32 width, u32 height, + u16 bpp, u16 flags) +{ + struct vbva_infoscreen *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_INFO_SCREEN); + if (!p) + return; + + p->view_index = display; + p->origin_x = origin_x; + p->origin_y = origin_y; + p->start_offset = start_offset; + p->line_size = pitch; + p->width = width; + p->height = height; + p->bits_per_pixel = bpp; + p->flags = flags; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); +} + +/** + * Report the rectangle relative to which absolute pointer events should be + * expressed. This information remains valid until the next VBVA resize event + * for any screen, at which time it is reset to the bounding rectangle of all + * virtual screens. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap to use. + * @origin_x: Upper left X co-ordinate relative to the first screen. + * @origin_y: Upper left Y co-ordinate relative to the first screen. + * @width: Rectangle width. + * @height: Rectangle height. + */ +int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, + u32 width, u32 height) +{ + struct vbva_report_input_mapping *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_REPORT_INPUT_MAPPING); + if (!p) + return -ENOMEM; + + p->x = origin_x; + p->y = origin_y; + p->cx = width; + p->cy = height; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Get most recent video mode hints. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap to use. + * @screens: The number of screens to query hints for, starting at 0. + * @hints: Array of vbva_modehint structures for receiving the hints. + */ +int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, + struct vbva_modehint *hints) +{ + struct vbva_query_mode_hints *p; + size_t size; + + if (WARN_ON(!hints)) + return -EINVAL; + + size = screens * sizeof(struct vbva_modehint); + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA, + VBVA_QUERY_MODE_HINTS); + if (!p) + return -ENOMEM; + + p->hints_queried_count = screens; + p->hint_structure_guest_size = sizeof(struct vbva_modehint); + p->rc = VERR_NOT_SUPPORTED; + + hgsmi_buffer_submit(ctx, p); + + if (p->rc < 0) { + hgsmi_buffer_free(ctx, p); + return -EIO; + } + + memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size); + hgsmi_buffer_free(ctx, p); + + return 0; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c new file mode 100644 index 000000000000..fb6a0f0b8167 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_drv.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <linux/console.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/vt_kern.h> + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> +#include <drm/drm_ioctl.h> + +#include "vbox_drv.h" + +static int vbox_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, vbox_modeset, int, 0400); + +static struct drm_driver driver; + +static const struct pci_device_id pciidlist[] = { + { PCI_DEVICE(0x80ee, 0xbeef) }, + { } +}; +MODULE_DEVICE_TABLE(pci, pciidlist); + +static struct drm_fb_helper_funcs vbox_fb_helper_funcs = { + .fb_probe = vboxfb_create, +}; + +static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct vbox_private *vbox; + int ret = 0; + + if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) + return -ENODEV; + + vbox = kzalloc(sizeof(*vbox), GFP_KERNEL); + if (!vbox) + return -ENOMEM; + + ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev); + if (ret) { + kfree(vbox); + return ret; + } + + vbox->ddev.pdev = pdev; + vbox->ddev.dev_private = vbox; + pci_set_drvdata(pdev, vbox); + mutex_init(&vbox->hw_mutex); + + ret = pci_enable_device(pdev); + if (ret) + goto err_dev_put; + + ret = vbox_hw_init(vbox); + if (ret) + goto err_pci_disable; + + ret = vbox_mm_init(vbox); + if (ret) + goto err_hw_fini; + + ret = vbox_mode_init(vbox); + if (ret) + goto err_mm_fini; + + ret = vbox_irq_init(vbox); + if (ret) + goto err_mode_fini; + + ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper, + &vbox_fb_helper_funcs, 32, + vbox->num_crtcs); + if (ret) + goto err_irq_fini; + + ret = drm_dev_register(&vbox->ddev, 0); + if (ret) + goto err_fbdev_fini; + + return 0; + +err_fbdev_fini: + vbox_fbdev_fini(vbox); +err_irq_fini: + vbox_irq_fini(vbox); +err_mode_fini: + vbox_mode_fini(vbox); +err_mm_fini: + vbox_mm_fini(vbox); +err_hw_fini: + vbox_hw_fini(vbox); +err_pci_disable: + pci_disable_device(pdev); +err_dev_put: + drm_dev_put(&vbox->ddev); + return ret; +} + +static void vbox_pci_remove(struct pci_dev *pdev) +{ + struct vbox_private *vbox = pci_get_drvdata(pdev); + + drm_dev_unregister(&vbox->ddev); + vbox_fbdev_fini(vbox); + vbox_irq_fini(vbox); + vbox_mode_fini(vbox); + vbox_mm_fini(vbox); + vbox_hw_fini(vbox); + drm_dev_put(&vbox->ddev); +} + +#ifdef CONFIG_PM_SLEEP +static int vbox_pm_suspend(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + int error; + + error = drm_mode_config_helper_suspend(&vbox->ddev); + if (error) + return error; + + pci_save_state(vbox->ddev.pdev); + pci_disable_device(vbox->ddev.pdev); + pci_set_power_state(vbox->ddev.pdev, PCI_D3hot); + + return 0; +} + +static int vbox_pm_resume(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + if (pci_enable_device(vbox->ddev.pdev)) + return -EIO; + + return drm_mode_config_helper_resume(&vbox->ddev); +} + +static int vbox_pm_freeze(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(&vbox->ddev); +} + +static int vbox_pm_thaw(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + return drm_mode_config_helper_resume(&vbox->ddev); +} + +static int vbox_pm_poweroff(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(&vbox->ddev); +} + +static const struct dev_pm_ops vbox_pm_ops = { + .suspend = vbox_pm_suspend, + .resume = vbox_pm_resume, + .freeze = vbox_pm_freeze, + .thaw = vbox_pm_thaw, + .poweroff = vbox_pm_poweroff, + .restore = vbox_pm_resume, +}; +#endif + +static struct pci_driver vbox_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = vbox_pci_probe, + .remove = vbox_pci_remove, +#ifdef CONFIG_PM_SLEEP + .driver.pm = &vbox_pm_ops, +#endif +}; + +static const struct file_operations vbox_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .mmap = vbox_mmap, + .poll = drm_poll, + .read = drm_read, +}; + +static struct drm_driver driver = { + .driver_features = + DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, + + .lastclose = drm_fb_helper_lastclose, + + .fops = &vbox_fops, + .irq_handler = vbox_irq_handler, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + .gem_free_object_unlocked = vbox_gem_free_object, + .dumb_create = vbox_dumb_create, + .dumb_map_offset = vbox_dumb_mmap_offset, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = vbox_gem_prime_pin, + .gem_prime_unpin = vbox_gem_prime_unpin, + .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table, + .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table, + .gem_prime_vmap = vbox_gem_prime_vmap, + .gem_prime_vunmap = vbox_gem_prime_vunmap, + .gem_prime_mmap = vbox_gem_prime_mmap, +}; + +static int __init vbox_init(void) +{ +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && vbox_modeset == -1) + return -EINVAL; +#endif + + if (vbox_modeset == 0) + return -EINVAL; + + return pci_register_driver(&vbox_pci_driver); +} + +static void __exit vbox_exit(void) +{ + pci_unregister_driver(&vbox_pci_driver); +} + +module_init(vbox_init); +module_exit(vbox_exit); + +MODULE_AUTHOR("Oracle Corporation"); +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h new file mode 100644 index 000000000000..0ecd0a44176e --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_drv.h + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#ifndef __VBOX_DRV_H__ +#define __VBOX_DRV_H__ + +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/irqreturn.h> +#include <linux/string.h> + +#include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem.h> + +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_memory.h> +#include <drm/ttm/ttm_module.h> + +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_ch_setup.h" + +#define DRIVER_NAME "vboxvideo" +#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card" +#define DRIVER_DATE "20130823" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define VBOX_MAX_CURSOR_WIDTH 64 +#define VBOX_MAX_CURSOR_HEIGHT 64 +#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT) +#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8) + +#define VBOX_MAX_SCREENS 32 + +#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \ + VBVA_ADAPTER_INFORMATION_SIZE) +#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE +#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \ + sizeof(struct hgsmi_host_flags)) +#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE + +struct vbox_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj; +}; + +struct vbox_private { + /* Must be first; or we must define our own release callback */ + struct drm_device ddev; + struct drm_fb_helper fb_helper; + struct vbox_framebuffer afb; + + u8 __iomem *guest_heap; + u8 __iomem *vbva_buffers; + struct gen_pool *guest_pool; + struct vbva_buf_ctx *vbva_info; + bool any_pitch; + u32 num_crtcs; + /* Amount of available VRAM, including space used for buffers. */ + u32 full_vram_size; + /* Amount of available VRAM, not including space used for buffers. */ + u32 available_vram_size; + /* Array of structures for receiving mode hints. */ + struct vbva_modehint *last_mode_hints; + + int fb_mtrr; + + struct { + struct ttm_bo_device bdev; + } ttm; + + struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */ + struct work_struct hotplug_work; + u32 input_mapping_width; + u32 input_mapping_height; + /* + * Is user-space using an X.Org-style layout of one large frame-buffer + * encompassing all screen ones or is the fbdev console active? + */ + bool single_framebuffer; + u8 cursor_data[CURSOR_DATA_SIZE]; +}; + +#undef CURSOR_PIXEL_COUNT +#undef CURSOR_DATA_SIZE + +struct vbox_gem_object; + +struct vbox_connector { + struct drm_connector base; + char name[32]; + struct vbox_crtc *vbox_crtc; + struct { + u32 width; + u32 height; + bool disconnected; + } mode_hint; +}; + +struct vbox_crtc { + struct drm_crtc base; + bool disconnected; + unsigned int crtc_id; + u32 fb_offset; + bool cursor_enabled; + u32 x_hint; + u32 y_hint; + /* + * When setting a mode we not only pass the mode to the hypervisor, + * but also information on how to map / translate input coordinates + * for the emulated USB tablet. This input-mapping may change when + * the mode on *another* crtc changes. + * + * This means that sometimes we must do a modeset on other crtc-s then + * the one being changed to update the input-mapping. Including crtc-s + * which may be disabled inside the guest (shown as a black window + * on the host unless closed by the user). + * + * With atomic modesetting the mode-info of disabled crtcs gets zeroed + * yet we need it when updating the input-map to avoid resizing the + * window as a side effect of a mode_set on another crtc. Therefor we + * cache the info of the last mode below. + */ + u32 width; + u32 height; + u32 x; + u32 y; +}; + +struct vbox_encoder { + struct drm_encoder base; +}; + +#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base) +#define to_vbox_connector(x) container_of(x, struct vbox_connector, base) +#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base) +#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base) + +bool vbox_check_supported(u16 id); +int vbox_hw_init(struct vbox_private *vbox); +void vbox_hw_fini(struct vbox_private *vbox); + +int vbox_mode_init(struct vbox_private *vbox); +void vbox_mode_fini(struct vbox_private *vbox); + +void vbox_report_caps(struct vbox_private *vbox); + +void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, + struct drm_clip_rect *rects, + unsigned int num_rects); + +int vbox_framebuffer_init(struct vbox_private *vbox, + struct vbox_framebuffer *vbox_fb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); + +int vboxfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +void vbox_fbdev_fini(struct vbox_private *vbox); + +struct vbox_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + struct ttm_bo_kmap_obj kmap; + struct drm_gem_object gem; + struct ttm_place placements[3]; + int pin_count; +}; + +#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem) + +static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vbox_bo, bo); +} + +#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base) + +static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo) +{ + return bo->bo.offset; +} + +int vbox_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +void vbox_gem_free_object(struct drm_gem_object *obj); +int vbox_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, u64 *offset); + +#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT) + +int vbox_mm_init(struct vbox_private *vbox); +void vbox_mm_fini(struct vbox_private *vbox); + +int vbox_bo_create(struct vbox_private *vbox, int size, int align, + u32 flags, struct vbox_bo **pvboxbo); + +int vbox_gem_create(struct vbox_private *vbox, + u32 size, bool iskernel, struct drm_gem_object **obj); + +int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag); +int vbox_bo_unpin(struct vbox_bo *bo); + +static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait) +{ + int ret; + + ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); + if (ret) { + if (ret != -ERESTARTSYS && ret != -EBUSY) + DRM_ERROR("reserve failed %p\n", bo); + return ret; + } + return 0; +} + +static inline void vbox_bo_unreserve(struct vbox_bo *bo) +{ + ttm_bo_unreserve(&bo->bo); +} + +void vbox_ttm_placement(struct vbox_bo *bo, int domain); +int vbox_bo_push_sysram(struct vbox_bo *bo); +int vbox_mmap(struct file *filp, struct vm_area_struct *vma); +void *vbox_bo_kmap(struct vbox_bo *bo); +void vbox_bo_kunmap(struct vbox_bo *bo); + +/* vbox_prime.c */ +int vbox_gem_prime_pin(struct drm_gem_object *obj); +void vbox_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *vbox_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table); +void *vbox_gem_prime_vmap(struct drm_gem_object *obj); +void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int vbox_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *area); + +/* vbox_irq.c */ +int vbox_irq_init(struct vbox_private *vbox); +void vbox_irq_fini(struct vbox_private *vbox); +void vbox_report_hotplug(struct vbox_private *vbox); +irqreturn_t vbox_irq_handler(int irq, void *arg); + +/* vbox_hgsmi.c */ +void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, + u8 channel, u16 channel_info); +void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf); +int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf); + +static inline void vbox_write_ioport(u16 index, u16 data) +{ + outw(index, VBE_DISPI_IOPORT_INDEX); + outw(data, VBE_DISPI_IOPORT_DATA); +} + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c new file mode 100644 index 000000000000..83a04afd1766 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_fb.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + */ +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/sysrq.h> +#include <linux/tty.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" + +#ifdef CONFIG_DRM_KMS_FB_HELPER +static struct fb_deferred_io vbox_defio = { + .delay = HZ / 30, + .deferred_io = drm_fb_helper_deferred_io, +}; +#endif + +static struct fb_ops vboxfb_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, +}; + +int vboxfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct vbox_private *vbox = + container_of(helper, struct vbox_private, fb_helper); + struct pci_dev *pdev = vbox->ddev.pdev; + struct drm_mode_fb_cmd2 mode_cmd; + struct drm_framebuffer *fb; + struct fb_info *info; + struct drm_gem_object *gobj; + struct vbox_bo *bo; + int size, ret; + u64 gpu_addr; + u32 pitch; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + mode_cmd.pitches[0] = pitch; + + size = pitch * mode_cmd.height; + + ret = vbox_gem_create(vbox, size, true, &gobj); + if (ret) { + DRM_ERROR("failed to create fbcon backing object %d\n", ret); + return ret; + } + + ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj); + if (ret) + return ret; + + bo = gem_to_vbox_bo(gobj); + + ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->screen_size = size; + info->screen_base = (char __iomem *)vbox_bo_kmap(bo); + if (IS_ERR(info->screen_base)) + return PTR_ERR(info->screen_base); + + info->par = helper; + + fb = &vbox->afb.base; + helper->fb = fb; + + strcpy(info->fix.id, "vboxdrmfb"); + + info->fbops = &vboxfb_ops; + + /* + * This seems to be done for safety checking that the framebuffer + * is not registered twice by different drivers. + */ + info->apertures->ranges[0].base = pci_resource_start(pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(pdev, 0); + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(info, helper, sizes->fb_width, + sizes->fb_height); + + gpu_addr = vbox_bo_gpu_offset(bo); + info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr; + info->fix.smem_len = vbox->available_vram_size - gpu_addr; + +#ifdef CONFIG_DRM_KMS_FB_HELPER + info->fbdefio = &vbox_defio; + fb_deferred_io_init(info); +#endif + + info->pixmap.flags = FB_PIXMAP_SYSTEM; + + DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); + + return 0; +} + +void vbox_fbdev_fini(struct vbox_private *vbox) +{ + struct vbox_framebuffer *afb = &vbox->afb; + +#ifdef CONFIG_DRM_KMS_FB_HELPER + if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio) + fb_deferred_io_cleanup(vbox->fb_helper.fbdev); +#endif + + drm_fb_helper_unregister_fbi(&vbox->fb_helper); + + if (afb->obj) { + struct vbox_bo *bo = gem_to_vbox_bo(afb->obj); + + vbox_bo_kunmap(bo); + + if (bo->pin_count) + vbox_bo_unpin(bo); + + drm_gem_object_put_unlocked(afb->obj); + afb->obj = NULL; + } + drm_fb_helper_fini(&vbox->fb_helper); + + drm_framebuffer_unregister_private(&afb->base); + drm_framebuffer_cleanup(&afb->base); +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c new file mode 100644 index 000000000000..94b60654a012 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2017 Oracle Corporation + * Authors: Hans de Goede <hdegoede@redhat.com> + */ + +#include "vbox_drv.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_defs.h" + +/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */ +static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size) +{ + while (size--) { + hash += *data++; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + return hash; +} + +static u32 hgsmi_hash_end(u32 hash) +{ + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash; +} + +/* Not really a checksum but that is the naming used in all vbox code */ +static u32 hgsmi_checksum(u32 offset, + const struct hgsmi_buffer_header *header, + const struct hgsmi_buffer_tail *tail) +{ + u32 checksum; + + checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset)); + checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header)); + /* 4 -> Do not checksum the checksum itself */ + checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4); + + return hgsmi_hash_end(checksum); +} + +void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, + u8 channel, u16 channel_info) +{ + struct hgsmi_buffer_header *h; + struct hgsmi_buffer_tail *t; + size_t total_size; + dma_addr_t offset; + + total_size = size + sizeof(*h) + sizeof(*t); + h = gen_pool_dma_alloc(guest_pool, total_size, &offset); + if (!h) + return NULL; + + t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size); + + h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE; + h->data_size = size; + h->channel = channel; + h->channel_info = channel_info; + memset(&h->u.header_data, 0, sizeof(h->u.header_data)); + + t->reserved = 0; + t->checksum = hgsmi_checksum(offset, h, t); + + return (u8 *)h + sizeof(*h); +} + +void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf) +{ + struct hgsmi_buffer_header *h = + (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h)); + size_t total_size = h->data_size + sizeof(*h) + + sizeof(struct hgsmi_buffer_tail); + + gen_pool_free(guest_pool, (unsigned long)h, total_size); +} + +int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf) +{ + phys_addr_t offset; + + offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf - + sizeof(struct hgsmi_buffer_header)); + outl(offset, VGA_PORT_HGSMI_GUEST); + /* Make the compiler aware that the host has changed memory. */ + mb(); + + return 0; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c new file mode 100644 index 000000000000..16a1e29f5292 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2016-2017 Oracle Corporation + * This file is based on qxl_irq.c + * Copyright 2013 Red Hat Inc. + * Authors: Dave Airlie + * Alon Levy + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/pci.h> +#include <drm/drm_irq.h> +#include <drm/drm_probe_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" + +static void vbox_clear_irq(void) +{ + outl((u32)~0, VGA_PORT_HGSMI_HOST); +} + +static u32 vbox_get_flags(struct vbox_private *vbox) +{ + return readl(vbox->guest_heap + HOST_FLAGS_OFFSET); +} + +void vbox_report_hotplug(struct vbox_private *vbox) +{ + schedule_work(&vbox->hotplug_work); +} + +irqreturn_t vbox_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct vbox_private *vbox = (struct vbox_private *)dev->dev_private; + u32 host_flags = vbox_get_flags(vbox); + + if (!(host_flags & HGSMIHOSTFLAGS_IRQ)) + return IRQ_NONE; + + /* + * Due to a bug in the initial host implementation of hot-plug irqs, + * the hot-plug and cursor capability flags were never cleared. + * Fortunately we can tell when they would have been set by checking + * that the VSYNC flag is not set. + */ + if (host_flags & + (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) && + !(host_flags & HGSMIHOSTFLAGS_VSYNC)) + vbox_report_hotplug(vbox); + + vbox_clear_irq(); + + return IRQ_HANDLED; +} + +/* + * Check that the position hints provided by the host are suitable for GNOME + * shell (i.e. all screens disjoint and hints for all enabled screens) and if + * not replace them with default ones. Providing valid hints improves the + * chances that we will get a known screen layout for pointer mapping. + */ +static void validate_or_set_position_hints(struct vbox_private *vbox) +{ + struct vbva_modehint *hintsi, *hintsj; + bool valid = true; + u16 currentx = 0; + int i, j; + + for (i = 0; i < vbox->num_crtcs; ++i) { + for (j = 0; j < i; ++j) { + hintsi = &vbox->last_mode_hints[i]; + hintsj = &vbox->last_mode_hints[j]; + + if (hintsi->enabled && hintsj->enabled) { + if (hintsi->dx >= 0xffff || + hintsi->dy >= 0xffff || + hintsj->dx >= 0xffff || + hintsj->dy >= 0xffff || + (hintsi->dx < + hintsj->dx + (hintsj->cx & 0x8fff) && + hintsi->dx + (hintsi->cx & 0x8fff) > + hintsj->dx) || + (hintsi->dy < + hintsj->dy + (hintsj->cy & 0x8fff) && + hintsi->dy + (hintsi->cy & 0x8fff) > + hintsj->dy)) + valid = false; + } + } + } + if (!valid) + for (i = 0; i < vbox->num_crtcs; ++i) { + if (vbox->last_mode_hints[i].enabled) { + vbox->last_mode_hints[i].dx = currentx; + vbox->last_mode_hints[i].dy = 0; + currentx += + vbox->last_mode_hints[i].cx & 0x8fff; + } + } +} + +/* Query the host for the most recent video mode hints. */ +static void vbox_update_mode_hints(struct vbox_private *vbox) +{ + struct drm_connector_list_iter conn_iter; + struct drm_device *dev = &vbox->ddev; + struct drm_connector *connector; + struct vbox_connector *vbox_conn; + struct vbva_modehint *hints; + u16 flags; + bool disconnected; + unsigned int crtc_id; + int ret; + + ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs, + vbox->last_mode_hints); + if (ret) { + DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret); + return; + } + + validate_or_set_position_hints(vbox); + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + vbox_conn = to_vbox_connector(connector); + + hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id]; + if (hints->magic != VBVAMODEHINT_MAGIC) + continue; + + disconnected = !(hints->enabled); + crtc_id = vbox_conn->vbox_crtc->crtc_id; + vbox_conn->mode_hint.width = hints->cx; + vbox_conn->mode_hint.height = hints->cy; + vbox_conn->vbox_crtc->x_hint = hints->dx; + vbox_conn->vbox_crtc->y_hint = hints->dy; + vbox_conn->mode_hint.disconnected = disconnected; + + if (vbox_conn->vbox_crtc->disconnected == disconnected) + continue; + + if (disconnected) + flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED; + else + flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK; + + hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0, + hints->cx * 4, hints->cx, + hints->cy, 0, flags); + + vbox_conn->vbox_crtc->disconnected = disconnected; + } + drm_connector_list_iter_end(&conn_iter); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void vbox_hotplug_worker(struct work_struct *work) +{ + struct vbox_private *vbox = container_of(work, struct vbox_private, + hotplug_work); + + vbox_update_mode_hints(vbox); + drm_kms_helper_hotplug_event(&vbox->ddev); +} + +int vbox_irq_init(struct vbox_private *vbox) +{ + INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker); + vbox_update_mode_hints(vbox); + + return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq); +} + +void vbox_irq_fini(struct vbox_private *vbox) +{ + drm_irq_uninstall(&vbox->ddev); + flush_work(&vbox->hotplug_work); +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c new file mode 100644 index 000000000000..f4d02de5518a --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_main.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com>, + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/vbox_err.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" + +static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); + + if (vbox_fb->obj) + drm_gem_object_put_unlocked(vbox_fb->obj); + + drm_framebuffer_cleanup(fb); + kfree(fb); +} + +void vbox_report_caps(struct vbox_private *vbox) +{ + u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION | + VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY; + + /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */ + hgsmi_send_caps_info(vbox->guest_pool, caps); + caps |= VBVACAPS_VIDEO_MODE_HINTS; + hgsmi_send_caps_info(vbox->guest_pool, caps); +} + +/* Send information about dirty rectangles to VBVA. */ +void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, + struct drm_clip_rect *rects, + unsigned int num_rects) +{ + struct vbox_private *vbox = fb->dev->dev_private; + struct drm_display_mode *mode; + struct drm_crtc *crtc; + int crtc_x, crtc_y; + unsigned int i; + + mutex_lock(&vbox->hw_mutex); + list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { + if (crtc->primary->state->fb != fb) + continue; + + mode = &crtc->state->mode; + crtc_x = crtc->primary->state->src_x >> 16; + crtc_y = crtc->primary->state->src_y >> 16; + + for (i = 0; i < num_rects; ++i) { + struct vbva_cmd_hdr cmd_hdr; + unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; + + if (rects[i].x1 > crtc_x + mode->hdisplay || + rects[i].y1 > crtc_y + mode->vdisplay || + rects[i].x2 < crtc_x || + rects[i].y2 < crtc_y) + continue; + + cmd_hdr.x = (s16)rects[i].x1; + cmd_hdr.y = (s16)rects[i].y1; + cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1; + cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; + + if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], + vbox->guest_pool)) + continue; + + vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, + &cmd_hdr, sizeof(cmd_hdr)); + vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); + } + } + mutex_unlock(&vbox->hw_mutex); +} + +static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *rects, + unsigned int num_rects) +{ + vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); + + return 0; +} + +static const struct drm_framebuffer_funcs vbox_fb_funcs = { + .destroy = vbox_user_framebuffer_destroy, + .dirty = vbox_user_framebuffer_dirty, +}; + +int vbox_framebuffer_init(struct vbox_private *vbox, + struct vbox_framebuffer *vbox_fb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + + drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd); + vbox_fb->obj = obj; + ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs); + if (ret) { + DRM_ERROR("framebuffer init failed %d\n", ret); + return ret; + } + + return 0; +} + +static int vbox_accel_init(struct vbox_private *vbox) +{ + struct vbva_buffer *vbva; + unsigned int i; + + vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs, + sizeof(*vbox->vbva_info), GFP_KERNEL); + if (!vbox->vbva_info) + return -ENOMEM; + + /* Take a command buffer for each screen from the end of usable VRAM. */ + vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; + + vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0, + vbox->available_vram_size, + vbox->num_crtcs * + VBVA_MIN_BUFFER_SIZE); + if (!vbox->vbva_buffers) + return -ENOMEM; + + for (i = 0; i < vbox->num_crtcs; ++i) { + vbva_setup_buffer_context(&vbox->vbva_info[i], + vbox->available_vram_size + + i * VBVA_MIN_BUFFER_SIZE, + VBVA_MIN_BUFFER_SIZE); + vbva = (void __force *)vbox->vbva_buffers + + i * VBVA_MIN_BUFFER_SIZE; + if (!vbva_enable(&vbox->vbva_info[i], + vbox->guest_pool, vbva, i)) { + /* very old host or driver error. */ + DRM_ERROR("vboxvideo: vbva_enable failed\n"); + } + } + + return 0; +} + +static void vbox_accel_fini(struct vbox_private *vbox) +{ + unsigned int i; + + for (i = 0; i < vbox->num_crtcs; ++i) + vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i); + + pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers); +} + +/* Do we support the 4.3 plus mode hint reporting interface? */ +static bool have_hgsmi_mode_hints(struct vbox_private *vbox) +{ + u32 have_hints, have_cursor; + int ret; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_MODE_HINT_REPORTING, + &have_hints); + if (ret) + return false; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, + &have_cursor); + if (ret) + return false; + + return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS; +} + +bool vbox_check_supported(u16 id) +{ + u16 dispi_id; + + vbox_write_ioport(VBE_DISPI_INDEX_ID, id); + dispi_id = inw(VBE_DISPI_IOPORT_DATA); + + return dispi_id == id; +} + +int vbox_hw_init(struct vbox_private *vbox) +{ + int ret = -ENOMEM; + + vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA); + vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX); + + DRM_INFO("VRAM %08x\n", vbox->full_vram_size); + + /* Map guest-heap at end of vram */ + vbox->guest_heap = + pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox), + GUEST_HEAP_SIZE); + if (!vbox->guest_heap) + return -ENOMEM; + + /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ + vbox->guest_pool = gen_pool_create(4, -1); + if (!vbox->guest_pool) + goto err_unmap_guest_heap; + + ret = gen_pool_add_virt(vbox->guest_pool, + (unsigned long)vbox->guest_heap, + GUEST_HEAP_OFFSET(vbox), + GUEST_HEAP_USABLE_SIZE, -1); + if (ret) + goto err_destroy_guest_pool; + + ret = hgsmi_test_query_conf(vbox->guest_pool); + if (ret) { + DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n"); + goto err_destroy_guest_pool; + } + + /* Reduce available VRAM size to reflect the guest heap. */ + vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox); + /* Linux drm represents monitors as a 32-bit array. */ + hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT, + &vbox->num_crtcs); + vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS); + + if (!have_hgsmi_mode_hints(vbox)) { + ret = -ENOTSUPP; + goto err_destroy_guest_pool; + } + + vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs, + sizeof(struct vbva_modehint), + GFP_KERNEL); + if (!vbox->last_mode_hints) { + ret = -ENOMEM; + goto err_destroy_guest_pool; + } + + ret = vbox_accel_init(vbox); + if (ret) + goto err_destroy_guest_pool; + + return 0; + +err_destroy_guest_pool: + gen_pool_destroy(vbox->guest_pool); +err_unmap_guest_heap: + pci_iounmap(vbox->ddev.pdev, vbox->guest_heap); + return ret; +} + +void vbox_hw_fini(struct vbox_private *vbox) +{ + vbox_accel_fini(vbox); + gen_pool_destroy(vbox->guest_pool); + pci_iounmap(vbox->ddev.pdev, vbox->guest_heap); +} + +int vbox_gem_create(struct vbox_private *vbox, + u32 size, bool iskernel, struct drm_gem_object **obj) +{ + struct vbox_bo *vboxbo; + int ret; + + *obj = NULL; + + size = roundup(size, PAGE_SIZE); + if (size == 0) + return -EINVAL; + + ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("failed to allocate GEM object\n"); + return ret; + } + + *obj = &vboxbo->gem; + + return 0; +} + +int vbox_dumb_create(struct drm_file *file, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct vbox_private *vbox = + container_of(dev, struct vbox_private, ddev); + struct drm_gem_object *gobj; + u32 handle; + int ret; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + ret = vbox_gem_create(vbox, args->size, false, &gobj); + if (ret) + return ret; + + ret = drm_gem_handle_create(file, gobj, &handle); + drm_gem_object_put_unlocked(gobj); + if (ret) + return ret; + + args->handle = handle; + + return 0; +} + +void vbox_gem_free_object(struct drm_gem_object *obj) +{ + struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj); + + ttm_bo_put(&vbox_bo->bo); +} + +static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo) +{ + return drm_vma_node_offset_addr(&bo->bo.vma_node); +} + +int +vbox_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, u64 *offset) +{ + struct drm_gem_object *obj; + int ret; + struct vbox_bo *bo; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(file, handle); + if (!obj) { + ret = -ENOENT; + goto out_unlock; + } + + bo = gem_to_vbox_bo(obj); + *offset = vbox_bo_mmap_offset(bo); + + drm_gem_object_put(obj); + ret = 0; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c new file mode 100644 index 000000000000..620a6e38f71f --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_mode.c + * Copyright 2012 Red Hat Inc. + * Parts based on xf86-video-ast + * Copyright (c) 2005 ASPEED Technology Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <linux/export.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "hgsmi_channels.h" +#include "vbox_drv.h" +#include "vboxvideo.h" + +/* + * Set a graphics mode. Poke any required values into registers, do an HGSMI + * mode set and tell the host we support advanced graphics functions. + */ +static void vbox_do_modeset(struct drm_crtc *crtc) +{ + struct drm_framebuffer *fb = crtc->primary->state->fb; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox; + int width, height, bpp, pitch; + u16 flags; + s32 x_offset, y_offset; + + vbox = crtc->dev->dev_private; + width = vbox_crtc->width ? vbox_crtc->width : 640; + height = vbox_crtc->height ? vbox_crtc->height : 480; + bpp = fb ? fb->format->cpp[0] * 8 : 32; + pitch = fb ? fb->pitches[0] : width * bpp / 8; + x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint; + y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint; + + /* + * This is the old way of setting graphics modes. It assumed one screen + * and a frame-buffer at the start of video RAM. On older versions of + * VirtualBox, certain parts of the code still assume that the first + * screen is programmed this way, so try to fake it. + */ + if (vbox_crtc->crtc_id == 0 && fb && + vbox_crtc->fb_offset / pitch < 0xffff - crtc->y && + vbox_crtc->fb_offset % (bpp / 8) == 0) { + vbox_write_ioport(VBE_DISPI_INDEX_XRES, width); + vbox_write_ioport(VBE_DISPI_INDEX_YRES, height); + vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp); + vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp); + vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED); + vbox_write_ioport( + VBE_DISPI_INDEX_X_OFFSET, + vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x); + vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET, + vbox_crtc->fb_offset / pitch + vbox_crtc->y); + } + + flags = VBVA_SCREEN_F_ACTIVE; + flags |= (fb && crtc->state->enable) ? 0 : VBVA_SCREEN_F_BLANK; + flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0; + hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id, + x_offset, y_offset, + vbox_crtc->x * bpp / 8 + + vbox_crtc->y * pitch, + pitch, width, height, bpp, flags); +} + +static int vbox_set_view(struct drm_crtc *crtc) +{ + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbva_infoview *p; + + /* + * Tell the host about the view. This design originally targeted the + * Windows XP driver architecture and assumed that each screen would + * have a dedicated frame buffer with the command buffer following it, + * the whole being a "view". The host works out which screen a command + * buffer belongs to by checking whether it is in the first view, then + * whether it is in the second and so on. The first match wins. We + * cheat around this by making the first view be the managed memory + * plus the first command buffer, the second the same plus the second + * buffer and so on. + */ + p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p), + HGSMI_CH_VBVA, VBVA_INFO_VIEW); + if (!p) + return -ENOMEM; + + p->view_index = vbox_crtc->crtc_id; + p->view_offset = vbox_crtc->fb_offset; + p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset + + vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE; + p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset; + + hgsmi_buffer_submit(vbox->guest_pool, p); + hgsmi_buffer_free(vbox->guest_pool, p); + + return 0; +} + +/* + * Try to map the layout of virtual screens to the range of the input device. + * Return true if we need to re-set the crtc modes due to screen offset + * changes. + */ +static bool vbox_set_up_input_mapping(struct vbox_private *vbox) +{ + struct drm_crtc *crtci; + struct drm_connector *connectori; + struct drm_framebuffer *fb, *fb1 = NULL; + bool single_framebuffer = true; + bool old_single_framebuffer = vbox->single_framebuffer; + u16 width = 0, height = 0; + + /* + * Are we using an X.Org-style single large frame-buffer for all crtcs? + * If so then screen layout can be deduced from the crtc offsets. + * Same fall-back if this is the fbdev frame-buffer. + */ + list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { + fb = crtci->primary->state->fb; + if (!fb) + continue; + + if (!fb1) { + fb1 = fb; + if (to_vbox_framebuffer(fb1) == &vbox->afb) + break; + } else if (fb != fb1) { + single_framebuffer = false; + } + } + if (!fb1) + return false; + + if (single_framebuffer) { + vbox->single_framebuffer = true; + vbox->input_mapping_width = fb1->width; + vbox->input_mapping_height = fb1->height; + return old_single_framebuffer != vbox->single_framebuffer; + } + /* Otherwise calculate the total span of all screens. */ + list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list, + head) { + struct vbox_connector *vbox_connector = + to_vbox_connector(connectori); + struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc; + + width = max_t(u16, width, vbox_crtc->x_hint + + vbox_connector->mode_hint.width); + height = max_t(u16, height, vbox_crtc->y_hint + + vbox_connector->mode_hint.height); + } + + vbox->single_framebuffer = false; + vbox->input_mapping_width = width; + vbox->input_mapping_height = height; + + return old_single_framebuffer != vbox->single_framebuffer; +} + +static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj); + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state); + + mutex_lock(&vbox->hw_mutex); + + if (crtc->state->enable) { + vbox_crtc->width = crtc->state->mode.hdisplay; + vbox_crtc->height = crtc->state->mode.vdisplay; + } + + vbox_crtc->x = x; + vbox_crtc->y = y; + vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo); + + /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */ + if (needs_modeset && vbox_set_up_input_mapping(vbox)) { + struct drm_crtc *crtci; + + list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, + head) { + if (crtci == crtc) + continue; + vbox_do_modeset(crtci); + } + } + + vbox_set_view(crtc); + vbox_do_modeset(crtc); + + if (needs_modeset) + hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, + vbox->input_mapping_width, + vbox->input_mapping_height); + + mutex_unlock(&vbox->hw_mutex); +} + +static void vbox_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ +} + +static void vbox_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ +} + +static void vbox_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_pending_vblank_event *event; + unsigned long flags; + + if (crtc->state && crtc->state->event) { + event = crtc->state->event; + crtc->state->event = NULL; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = { + .atomic_enable = vbox_crtc_atomic_enable, + .atomic_disable = vbox_crtc_atomic_disable, + .atomic_flush = vbox_crtc_atomic_flush, +}; + +static void vbox_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_funcs vbox_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + /* .gamma_set = vbox_crtc_gamma_set, */ + .destroy = vbox_crtc_destroy, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int vbox_primary_atomic_check(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_crtc_state *crtc_state = NULL; + + if (new_state->crtc) { + crtc_state = drm_atomic_get_existing_crtc_state( + new_state->state, new_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + } + + return drm_atomic_helper_check_plane_state(new_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); +} + +static void vbox_primary_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_crtc *crtc = plane->state->crtc; + struct drm_framebuffer *fb = plane->state->fb; + + vbox_crtc_set_base_and_mode(crtc, fb, + plane->state->src_x >> 16, + plane->state->src_y >> 16); +} + +static void vbox_primary_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_crtc *crtc = old_state->crtc; + + /* vbox_do_modeset checks plane->state->fb and will disable if NULL */ + vbox_crtc_set_base_and_mode(crtc, old_state->fb, + old_state->src_x >> 16, + old_state->src_y >> 16); +} + +static int vbox_primary_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct vbox_bo *bo; + int ret; + + if (!new_state->fb) + return 0; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj); + ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM); + if (ret) + DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret); + + return ret; +} + +static void vbox_primary_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_bo *bo; + + if (!old_state->fb) + return; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj); + vbox_bo_unpin(bo); +} + +static int vbox_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_crtc_state *crtc_state = NULL; + u32 width = new_state->crtc_w; + u32 height = new_state->crtc_h; + int ret; + + if (new_state->crtc) { + crtc_state = drm_atomic_get_existing_crtc_state( + new_state->state, new_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + } + + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); + if (ret) + return ret; + + if (!new_state->fb) + return 0; + + if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT || + width == 0 || height == 0) + return -EINVAL; + + return 0; +} + +/* + * Copy the ARGB image and generate the mask, which is needed in case the host + * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set + * if the corresponding alpha value in the ARGB image is greater than 0xF0. + */ +static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height, + size_t mask_size) +{ + size_t line_size = (width + 7) / 8; + u32 i, j; + + memcpy(dst + mask_size, src, width * height * 4); + for (i = 0; i < height; ++i) + for (j = 0; j < width; ++j) + if (((u32 *)src)[i * width + j] > 0xf0000000) + dst[i * line_size + j / 8] |= (0x80 >> (j % 8)); +} + +static void vbox_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_private *vbox = + container_of(plane->dev, struct vbox_private, ddev); + struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc); + struct drm_framebuffer *fb = plane->state->fb; + struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj); + u32 width = plane->state->crtc_w; + u32 height = plane->state->crtc_h; + size_t data_size, mask_size; + u32 flags; + u8 *src; + + /* + * VirtualBox uses the host windowing system to draw the cursor so + * moves are a no-op, we only need to upload new cursor sprites. + */ + if (fb == old_state->fb) + return; + + mutex_lock(&vbox->hw_mutex); + + vbox_crtc->cursor_enabled = true; + + /* pinning is done in prepare/cleanup framebuffer */ + src = vbox_bo_kmap(bo); + if (IS_ERR(src)) { + mutex_unlock(&vbox->hw_mutex); + DRM_WARN("Could not kmap cursor bo, skipping update\n"); + return; + } + + /* + * The mask must be calculated based on the alpha + * channel, one bit per ARGB word, and must be 32-bit + * padded. + */ + mask_size = ((width + 7) / 8 * height + 3) & ~3; + data_size = width * height * 4 + mask_size; + + copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); + vbox_bo_kunmap(bo); + + flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | + VBOX_MOUSE_POINTER_ALPHA; + hgsmi_update_pointer_shape(vbox->guest_pool, flags, + min_t(u32, max(fb->hot_x, 0), width), + min_t(u32, max(fb->hot_y, 0), height), + width, height, vbox->cursor_data, data_size); + + mutex_unlock(&vbox->hw_mutex); +} + +static void vbox_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_private *vbox = + container_of(plane->dev, struct vbox_private, ddev); + struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc); + bool cursor_enabled = false; + struct drm_crtc *crtci; + + mutex_lock(&vbox->hw_mutex); + + vbox_crtc->cursor_enabled = false; + + list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { + if (to_vbox_crtc(crtci)->cursor_enabled) + cursor_enabled = true; + } + + if (!cursor_enabled) + hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0, + 0, 0, NULL, 0); + + mutex_unlock(&vbox->hw_mutex); +} + +static int vbox_cursor_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct vbox_bo *bo; + + if (!new_state->fb) + return 0; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj); + return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM); +} + +static void vbox_cursor_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_bo *bo; + + if (!plane->state->fb) + return; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj); + vbox_bo_unpin(bo); +} + +static const u32 vbox_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = { + .atomic_check = vbox_cursor_atomic_check, + .atomic_update = vbox_cursor_atomic_update, + .atomic_disable = vbox_cursor_atomic_disable, + .prepare_fb = vbox_cursor_prepare_fb, + .cleanup_fb = vbox_cursor_cleanup_fb, +}; + +static const struct drm_plane_funcs vbox_cursor_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_primary_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const u32 vbox_primary_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = { + .atomic_check = vbox_primary_atomic_check, + .atomic_update = vbox_primary_atomic_update, + .atomic_disable = vbox_primary_atomic_disable, + .prepare_fb = vbox_primary_prepare_fb, + .cleanup_fb = vbox_primary_cleanup_fb, +}; + +static const struct drm_plane_funcs vbox_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_primary_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct drm_plane *vbox_create_plane(struct vbox_private *vbox, + unsigned int possible_crtcs, + enum drm_plane_type type) +{ + const struct drm_plane_helper_funcs *helper_funcs = NULL; + const struct drm_plane_funcs *funcs; + struct drm_plane *plane; + const u32 *formats; + int num_formats; + int err; + + if (type == DRM_PLANE_TYPE_PRIMARY) { + funcs = &vbox_primary_plane_funcs; + formats = vbox_primary_plane_formats; + helper_funcs = &vbox_primary_helper_funcs; + num_formats = ARRAY_SIZE(vbox_primary_plane_formats); + } else if (type == DRM_PLANE_TYPE_CURSOR) { + funcs = &vbox_cursor_plane_funcs; + formats = vbox_cursor_plane_formats; + helper_funcs = &vbox_cursor_helper_funcs; + num_formats = ARRAY_SIZE(vbox_cursor_plane_formats); + } else { + return ERR_PTR(-EINVAL); + } + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs, + funcs, formats, num_formats, + NULL, type, NULL); + if (err) + goto free_plane; + + drm_plane_helper_add(plane, helper_funcs); + + return plane; + +free_plane: + kfree(plane); + return ERR_PTR(-EINVAL); +} + +static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i) +{ + struct vbox_private *vbox = + container_of(dev, struct vbox_private, ddev); + struct drm_plane *cursor = NULL; + struct vbox_crtc *vbox_crtc; + struct drm_plane *primary; + u32 caps = 0; + int ret; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps); + if (ret) + return ERR_PTR(ret); + + vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL); + if (!vbox_crtc) + return ERR_PTR(-ENOMEM); + + primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(primary)) { + ret = PTR_ERR(primary); + goto free_mem; + } + + if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) { + cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR); + if (IS_ERR(cursor)) { + ret = PTR_ERR(cursor); + goto clean_primary; + } + } else { + DRM_WARN("VirtualBox host is too old, no cursor support\n"); + } + + vbox_crtc->crtc_id = i; + + ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor, + &vbox_crtc_funcs, NULL); + if (ret) + goto clean_cursor; + + drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256); + drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs); + + return vbox_crtc; + +clean_cursor: + if (cursor) { + drm_plane_cleanup(cursor); + kfree(cursor); + } +clean_primary: + drm_plane_cleanup(primary); + kfree(primary); +free_mem: + kfree(vbox_crtc); + return ERR_PTR(ret); +} + +static void vbox_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_funcs vbox_enc_funcs = { + .destroy = vbox_encoder_destroy, +}; + +static struct drm_encoder *vbox_encoder_init(struct drm_device *dev, + unsigned int i) +{ + struct vbox_encoder *vbox_encoder; + + vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL); + if (!vbox_encoder) + return NULL; + + drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); + + vbox_encoder->base.possible_crtcs = 1 << i; + return &vbox_encoder->base; +} + +/* + * Generate EDID data with a mode-unique serial number for the virtual + * monitor to try to persuade Unity that different modes correspond to + * different monitors and it should not try to force the same resolution on + * them. + */ +static void vbox_set_edid(struct drm_connector *connector, int width, + int height) +{ + enum { EDID_SIZE = 128 }; + unsigned char edid[EDID_SIZE] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */ + 0x58, 0x58, /* manufacturer (VBX) */ + 0x00, 0x00, /* product code */ + 0x00, 0x00, 0x00, 0x00, /* serial number goes here */ + 0x01, /* week of manufacture */ + 0x00, /* year of manufacture */ + 0x01, 0x03, /* EDID version */ + 0x80, /* capabilities - digital */ + 0x00, /* horiz. res in cm, zero for projectors */ + 0x00, /* vert. res in cm */ + 0x78, /* display gamma (120 == 2.2). */ + 0xEE, /* features (standby, suspend, off, RGB, std */ + /* colour space, preferred timing mode) */ + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54, + /* chromaticity for standard colour space. */ + 0x00, 0x00, 0x00, /* no default timings */ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, /* no standard timings */ + 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02, + 0x02, 0x02, + /* descriptor block 1 goes below */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* descriptor block 2, monitor ranges */ + 0x00, 0x00, 0x00, 0xFD, 0x00, + 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20, + 0x20, 0x20, + /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */ + 0x20, + /* descriptor block 3, monitor name */ + 0x00, 0x00, 0x00, 0xFC, 0x00, + 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r', + '\n', + /* descriptor block 4: dummy data */ + 0x00, 0x00, 0x00, 0x10, 0x00, + 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, + 0x00, /* number of extensions */ + 0x00 /* checksum goes here */ + }; + int clock = (width + 6) * (height + 6) * 60 / 10000; + unsigned int i, sum = 0; + + edid[12] = width & 0xff; + edid[13] = width >> 8; + edid[14] = height & 0xff; + edid[15] = height >> 8; + edid[54] = clock & 0xff; + edid[55] = clock >> 8; + edid[56] = width & 0xff; + edid[58] = (width >> 4) & 0xf0; + edid[59] = height & 0xff; + edid[61] = (height >> 4) & 0xf0; + for (i = 0; i < EDID_SIZE - 1; ++i) + sum += edid[i]; + edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF; + drm_connector_update_edid_property(connector, (struct edid *)edid); +} + +static int vbox_get_modes(struct drm_connector *connector) +{ + struct vbox_connector *vbox_connector = NULL; + struct drm_display_mode *mode = NULL; + struct vbox_private *vbox = NULL; + unsigned int num_modes = 0; + int preferred_width, preferred_height; + + vbox_connector = to_vbox_connector(connector); + vbox = connector->dev->dev_private; + + hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) + + HOST_FLAGS_OFFSET); + if (vbox_connector->vbox_crtc->crtc_id == 0) + vbox_report_caps(vbox); + + num_modes = drm_add_modes_noedid(connector, 2560, 1600); + preferred_width = vbox_connector->mode_hint.width ? + vbox_connector->mode_hint.width : 1024; + preferred_height = vbox_connector->mode_hint.height ? + vbox_connector->mode_hint.height : 768; + mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height, + 60, false, false, false); + if (mode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + ++num_modes; + } + vbox_set_edid(connector, preferred_width, preferred_height); + + if (vbox_connector->vbox_crtc->x_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_x_property, + vbox_connector->vbox_crtc->x_hint); + else + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_x_property, 0); + + if (vbox_connector->vbox_crtc->y_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_y_property, + vbox_connector->vbox_crtc->y_hint); + else + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_y_property, 0); + + return num_modes; +} + +static void vbox_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static enum drm_connector_status +vbox_connector_detect(struct drm_connector *connector, bool force) +{ + struct vbox_connector *vbox_connector; + + vbox_connector = to_vbox_connector(connector); + + return vbox_connector->mode_hint.disconnected ? + connector_status_disconnected : connector_status_connected; +} + +static int vbox_fill_modes(struct drm_connector *connector, u32 max_x, + u32 max_y) +{ + struct vbox_connector *vbox_connector; + struct drm_device *dev; + struct drm_display_mode *mode, *iterator; + + vbox_connector = to_vbox_connector(connector); + dev = vbox_connector->base.dev; + list_for_each_entry_safe(mode, iterator, &connector->modes, head) { + list_del(&mode->head); + drm_mode_destroy(dev, mode); + } + + return drm_helper_probe_single_connector_modes(connector, max_x, max_y); +} + +static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = { + .get_modes = vbox_get_modes, +}; + +static const struct drm_connector_funcs vbox_connector_funcs = { + .detect = vbox_connector_detect, + .fill_modes = vbox_fill_modes, + .destroy = vbox_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vbox_connector_init(struct drm_device *dev, + struct vbox_crtc *vbox_crtc, + struct drm_encoder *encoder) +{ + struct vbox_connector *vbox_connector; + struct drm_connector *connector; + + vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL); + if (!vbox_connector) + return -ENOMEM; + + connector = &vbox_connector->base; + vbox_connector->vbox_crtc = vbox_crtc; + + drm_connector_init(dev, connector, &vbox_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + drm_connector_helper_add(connector, &vbox_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + drm_mode_create_suggested_offset_properties(dev); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_x_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_y_property, 0); + + drm_connector_attach_encoder(connector, encoder); + + return 0; +} + +static struct drm_framebuffer *vbox_user_framebuffer_create( + struct drm_device *dev, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct vbox_private *vbox = + container_of(dev, struct vbox_private, ddev); + struct drm_gem_object *obj; + struct vbox_framebuffer *vbox_fb; + int ret = -ENOMEM; + + obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); + if (!obj) + return ERR_PTR(-ENOENT); + + vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); + if (!vbox_fb) + goto err_unref_obj; + + ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj); + if (ret) + goto err_free_vbox_fb; + + return &vbox_fb->base; + +err_free_vbox_fb: + kfree(vbox_fb); +err_unref_obj: + drm_gem_object_put_unlocked(obj); + return ERR_PTR(ret); +} + +static const struct drm_mode_config_funcs vbox_mode_funcs = { + .fb_create = vbox_user_framebuffer_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +int vbox_mode_init(struct vbox_private *vbox) +{ + struct drm_device *dev = &vbox->ddev; + struct drm_encoder *encoder; + struct vbox_crtc *vbox_crtc; + unsigned int i; + int ret; + + drm_mode_config_init(dev); + + dev->mode_config.funcs = (void *)&vbox_mode_funcs; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.max_width = VBE_DISPI_MAX_XRES; + dev->mode_config.max_height = VBE_DISPI_MAX_YRES; + + for (i = 0; i < vbox->num_crtcs; ++i) { + vbox_crtc = vbox_crtc_init(dev, i); + if (IS_ERR(vbox_crtc)) { + ret = PTR_ERR(vbox_crtc); + goto err_drm_mode_cleanup; + } + encoder = vbox_encoder_init(dev, i); + if (!encoder) { + ret = -ENOMEM; + goto err_drm_mode_cleanup; + } + ret = vbox_connector_init(dev, vbox_crtc, encoder); + if (ret) + goto err_drm_mode_cleanup; + } + + drm_mode_config_reset(dev); + return 0; + +err_drm_mode_cleanup: + drm_mode_config_cleanup(dev); + return ret; +} + +void vbox_mode_fini(struct vbox_private *vbox) +{ + drm_mode_config_cleanup(&vbox->ddev); +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c new file mode 100644 index 000000000000..d61985b0c6eb --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_prime.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2017 Oracle Corporation + * Copyright 2017 Canonical + * Authors: Andreas Pokorny + */ + +#include "vbox_drv.h" + +/* + * Based on qxl_prime.c: + * Empty Implementations as there should not be any other driver for a virtual + * device that might share buffers with vboxvideo + */ + +int vbox_gem_prime_pin(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return -ENOSYS; +} + +void vbox_gem_prime_unpin(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); +} + +struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +struct drm_gem_object *vbox_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +void *vbox_gem_prime_vmap(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + WARN_ONCE(1, "not implemented"); +} + +int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area) +{ + WARN_ONCE(1, "not implemented"); + return -ENOSYS; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c new file mode 100644 index 000000000000..30f270027acf --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_ttm.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com> + */ +#include <linux/pci.h> +#include <drm/drm_file.h> +#include <drm/ttm/ttm_page_alloc.h> +#include "vbox_drv.h" + +static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct vbox_private, ttm.bdev); +} + +static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo) +{ + struct vbox_bo *bo; + + bo = container_of(tbo, struct vbox_bo, bo); + + drm_gem_object_release(&bo->gem); + kfree(bo); +} + +static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &vbox_bo_ttm_destroy) + return true; + + return false; +} + +static int +vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); + return -EINVAL; + } + + return 0; +} + +static void +vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct vbox_bo *vboxbo = vbox_bo(bo); + + if (!vbox_ttm_bo_is_vbox_bo(bo)) + return; + + vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM); + *pl = vboxbo->placement; +} + +static int vbox_bo_verify_access(struct ttm_buffer_object *bo, + struct file *filp) +{ + return 0; +} + +static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct vbox_private *vbox = vbox_bdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ +} + +static void vbox_ttm_backend_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func vbox_tt_backend_func = { + .destroy = &vbox_ttm_backend_destroy, +}; + +static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo, + u32 page_flags) +{ + struct ttm_tt *tt; + + tt = kzalloc(sizeof(*tt), GFP_KERNEL); + if (!tt) + return NULL; + + tt->func = &vbox_tt_backend_func; + if (ttm_tt_init(tt, bo, page_flags)) { + kfree(tt); + return NULL; + } + + return tt; +} + +static struct ttm_bo_driver vbox_bo_driver = { + .ttm_tt_create = vbox_ttm_tt_create, + .init_mem_type = vbox_bo_init_mem_type, + .eviction_valuable = ttm_bo_eviction_valuable, + .evict_flags = vbox_bo_evict_flags, + .verify_access = vbox_bo_verify_access, + .io_mem_reserve = &vbox_ttm_io_mem_reserve, + .io_mem_free = &vbox_ttm_io_mem_free, +}; + +int vbox_mm_init(struct vbox_private *vbox) +{ + int ret; + struct drm_device *dev = &vbox->ddev; + struct ttm_bo_device *bdev = &vbox->ttm.bdev; + + ret = ttm_bo_device_init(&vbox->ttm.bdev, + &vbox_bo_driver, + dev->anon_inode->i_mapping, + DRM_FILE_PAGE_OFFSET, true); + if (ret) { + DRM_ERROR("Error initialising bo driver; %d\n", ret); + return ret; + } + + ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, + vbox->available_vram_size >> PAGE_SHIFT); + if (ret) { + DRM_ERROR("Failed ttm VRAM init: %d\n", ret); + goto err_device_release; + } + +#ifdef DRM_MTRR_WC + vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0), + DRM_MTRR_WC); +#else + vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); +#endif + return 0; + +err_device_release: + ttm_bo_device_release(&vbox->ttm.bdev); + return ret; +} + +void vbox_mm_fini(struct vbox_private *vbox) +{ +#ifdef DRM_MTRR_WC + drm_mtrr_del(vbox->fb_mtrr, + pci_resource_start(vbox->ddev.pdev, 0), + pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC); +#else + arch_phys_wc_del(vbox->fb_mtrr); +#endif + ttm_bo_device_release(&vbox->ttm.bdev); +} + +void vbox_ttm_placement(struct vbox_bo *bo, int domain) +{ + unsigned int i; + u32 c = 0; + + bo->placement.placement = bo->placements; + bo->placement.busy_placement = bo->placements; + + if (domain & TTM_PL_FLAG_VRAM) + bo->placements[c++].flags = + TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + if (domain & TTM_PL_FLAG_SYSTEM) + bo->placements[c++].flags = + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!c) + bo->placements[c++].flags = + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + + bo->placement.num_placement = c; + bo->placement.num_busy_placement = c; + + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } +} + +int vbox_bo_create(struct vbox_private *vbox, int size, int align, + u32 flags, struct vbox_bo **pvboxbo) +{ + struct vbox_bo *vboxbo; + size_t acc_size; + int ret; + + vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL); + if (!vboxbo) + return -ENOMEM; + + ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size); + if (ret) + goto err_free_vboxbo; + + vboxbo->bo.bdev = &vbox->ttm.bdev; + + vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); + + acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size, + sizeof(struct vbox_bo)); + + ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size, + ttm_bo_type_device, &vboxbo->placement, + align >> PAGE_SHIFT, false, acc_size, + NULL, NULL, vbox_bo_ttm_destroy); + if (ret) + goto err_free_vboxbo; + + *pvboxbo = vboxbo; + + return 0; + +err_free_vboxbo: + kfree(vboxbo); + return ret; +} + +int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag) +{ + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + if (bo->pin_count) { + bo->pin_count++; + return 0; + } + + ret = vbox_bo_reserve(bo, false); + if (ret) + return ret; + + vbox_ttm_placement(bo, pl_flag); + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + if (ret == 0) + bo->pin_count = 1; + + vbox_bo_unreserve(bo); + + return ret; +} + +int vbox_bo_unpin(struct vbox_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + ret = vbox_bo_reserve(bo, false); + if (ret) { + DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret); + return ret; + } + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + + vbox_bo_unreserve(bo); + + return ret; +} + +/* + * Move a vbox-owned buffer object to system memory if no one else has it + * pinned. The caller must have pinned it previously, and this call will + * release the caller's pin. + */ +int vbox_bo_push_sysram(struct vbox_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + if (bo->kmap.virtual) { + ttm_bo_kunmap(&bo->kmap); + bo->kmap.virtual = NULL; + } + + vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + if (ret) { + DRM_ERROR("pushing to VRAM failed\n"); + return ret; + } + + return 0; +} + +int vbox_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct vbox_private *vbox; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return -EINVAL; + + file_priv = filp->private_data; + vbox = file_priv->minor->dev->dev_private; + + return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev); +} + +void *vbox_bo_kmap(struct vbox_bo *bo) +{ + int ret; + + if (bo->kmap.virtual) + return bo->kmap.virtual; + + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { + DRM_ERROR("Error kmapping bo: %d\n", ret); + return NULL; + } + + return bo->kmap.virtual; +} + +void vbox_bo_kunmap(struct vbox_bo *bo) +{ + if (bo->kmap.virtual) { + ttm_bo_kunmap(&bo->kmap); + bo->kmap.virtual = NULL; + } +} diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h new file mode 100644 index 000000000000..0592004f71aa --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOXVIDEO_H__ +#define __VBOXVIDEO_H__ + +#define VBOX_VIDEO_MAX_SCREENS 64 + +/* + * The last 4096 bytes of the guest VRAM contains the generic info for all + * DualView chunks: sizes and offsets of chunks. This is filled by miniport. + * + * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info, + * etc. This is used exclusively by the corresponding instance of a display + * driver. + * + * The VRAM layout: + * Last 4096 bytes - Adapter information area. + * 4096 bytes aligned miniport heap (value specified in the config rouded up). + * Slack - what left after dividing the VRAM. + * 4096 bytes aligned framebuffers: + * last 4096 bytes of each framebuffer is the display information area. + * + * The Virtual Graphics Adapter information in the guest VRAM is stored by the + * guest video driver using structures prepended by VBOXVIDEOINFOHDR. + * + * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO + * the host starts to process the info. The first element at the start of + * the 4096 bytes region should be normally be a LINK that points to + * actual information chain. That way the guest driver can have some + * fixed layout of the information memory block and just rewrite + * the link to point to relevant memory chain. + * + * The processing stops at the END element. + * + * The host can access the memory only when the port IO is processed. + * All data that will be needed later must be copied from these 4096 bytes. + * But other VRAM can be used by host until the mode is disabled. + * + * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO + * to disable the mode. + * + * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information + * from the host and issue commands to the host. + * + * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the + * following operations with the VBE data register can be performed: + * + * Operation Result + * write 16 bit value NOP + * read 16 bit value count of monitors + * write 32 bit value set the vbox cmd value and the cmd processed by the host + * read 32 bit value result of the last vbox command is returned + */ + +struct vbva_cmd_hdr { + s16 x; + s16 y; + u16 w; + u16 h; +} __packed; + +/* + * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of + * data. For example big bitmaps which do not fit to the buffer. + * + * Guest starts writing to the buffer by initializing a record entry in the + * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being + * written. As data is written to the ring buffer, the guest increases + * free_offset. + * + * The host reads the records on flushes and processes all completed records. + * When host encounters situation when only a partial record presents and + * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE - + * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates + * data_offset. After that on each flush the host continues fetching the data + * until the record is completed. + */ + +#define VBVA_RING_BUFFER_SIZE (4194304 - 1024) +#define VBVA_RING_BUFFER_THRESHOLD (4096) + +#define VBVA_MAX_RECORDS (64) + +#define VBVA_F_MODE_ENABLED 0x00000001u +#define VBVA_F_MODE_VRDP 0x00000002u +#define VBVA_F_MODE_VRDP_RESET 0x00000004u +#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u + +#define VBVA_F_STATE_PROCESSING 0x00010000u + +#define VBVA_F_RECORD_PARTIAL 0x80000000u + +struct vbva_record { + u32 len_and_flags; +} __packed; + +/* + * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of + * the runtime heapsimple API. Use minimum 2 pages here, because the info area + * also may contain other data (for example hgsmi_host_flags structure). + */ +#define VBVA_ADAPTER_INFORMATION_SIZE 65536 +#define VBVA_MIN_BUFFER_SIZE 65536 + +/* The value for port IO to let the adapter to interpret the adapter memory. */ +#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF + +/* The value for port IO to let the adapter to interpret the adapter memory. */ +#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000 + +/* + * The value for port IO to let the adapter to interpret the display memory. + * The display number is encoded in low 16 bits. + */ +#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000 + +struct vbva_host_flags { + u32 host_events; + u32 supported_orders; +} __packed; + +struct vbva_buffer { + struct vbva_host_flags host_flags; + + /* The offset where the data start in the buffer. */ + u32 data_offset; + /* The offset where next data must be placed in the buffer. */ + u32 free_offset; + + /* The queue of record descriptions. */ + struct vbva_record records[VBVA_MAX_RECORDS]; + u32 record_first_index; + u32 record_free_index; + + /* Space to leave free when large partial records are transferred. */ + u32 partial_write_tresh; + + u32 data_len; + /* variable size for the rest of the vbva_buffer area in VRAM. */ + u8 data[0]; +} __packed; + +#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024) + +/* guest->host commands */ +#define VBVA_QUERY_CONF32 1 +#define VBVA_SET_CONF32 2 +#define VBVA_INFO_VIEW 3 +#define VBVA_INFO_HEAP 4 +#define VBVA_FLUSH 5 +#define VBVA_INFO_SCREEN 6 +#define VBVA_ENABLE 7 +#define VBVA_MOUSE_POINTER_SHAPE 8 +/* informs host about HGSMI caps. see vbva_caps below */ +#define VBVA_INFO_CAPS 12 +/* configures scanline, see VBVASCANLINECFG below */ +#define VBVA_SCANLINE_CFG 13 +/* requests scanline info, see VBVASCANLINEINFO below */ +#define VBVA_SCANLINE_INFO 14 +/* inform host about VBVA Command submission */ +#define VBVA_CMDVBVA_SUBMIT 16 +/* inform host about VBVA Command submission */ +#define VBVA_CMDVBVA_FLUSH 17 +/* G->H DMA command */ +#define VBVA_CMDVBVA_CTL 18 +/* Query most recent mode hints sent */ +#define VBVA_QUERY_MODE_HINTS 19 +/* + * Report the guest virtual desktop position and size for mapping host and + * guest pointer positions. + */ +#define VBVA_REPORT_INPUT_MAPPING 20 +/* Report the guest cursor position and query the host position. */ +#define VBVA_CURSOR_POSITION 21 + +/* host->guest commands */ +#define VBVAHG_EVENT 1 +#define VBVAHG_DISPLAY_CUSTOM 2 + +/* vbva_conf32::index */ +#define VBOX_VBVA_CONF32_MONITOR_COUNT 0 +#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1 +/* + * Returns VINF_SUCCESS if the host can report mode hints via VBVA. + * Set value to VERR_NOT_SUPPORTED before calling. + */ +#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2 +/* + * Returns VINF_SUCCESS if the host can report guest cursor enabled status via + * VBVA. Set value to VERR_NOT_SUPPORTED before calling. + */ +#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3 +/* + * Returns the currently available host cursor capabilities. Available if + * VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success. + */ +#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4 +/* Returns the supported flags in vbva_infoscreen.flags. */ +#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5 +/* Returns the max size of VBVA record. */ +#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6 + +struct vbva_conf32 { + u32 index; + u32 value; +} __packed; + +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0) +/* + * Guest cursor capability: can the host show a hardware cursor at the host + * pointer location? + */ +#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1) +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2) +/* Reserved for historical reasons. Must always be unset. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3) +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4) +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5) + +struct vbva_infoview { + /* Index of the screen, assigned by the guest. */ + u32 view_index; + + /* The screen offset in VRAM, the framebuffer starts here. */ + u32 view_offset; + + /* The size of the VRAM memory that can be used for the view. */ + u32 view_size; + + /* The recommended maximum size of the VRAM memory for the screen. */ + u32 max_screen_size; +} __packed; + +struct vbva_flush { + u32 reserved; +} __packed; + +/* vbva_infoscreen.flags */ +#define VBVA_SCREEN_F_NONE 0x0000 +#define VBVA_SCREEN_F_ACTIVE 0x0001 +/* + * The virtual monitor has been disabled by the guest and should be removed + * by the host and ignored for purposes of pointer position calculation. + */ +#define VBVA_SCREEN_F_DISABLED 0x0002 +/* + * The virtual monitor has been blanked by the guest and should be blacked + * out by the host using width, height, etc values from the vbva_infoscreen + * request. + */ +#define VBVA_SCREEN_F_BLANK 0x0004 +/* + * The virtual monitor has been blanked by the guest and should be blacked + * out by the host using the previous mode values for width. height, etc. + */ +#define VBVA_SCREEN_F_BLANK2 0x0008 + +struct vbva_infoscreen { + /* Which view contains the screen. */ + u32 view_index; + + /* Physical X origin relative to the primary screen. */ + s32 origin_x; + + /* Physical Y origin relative to the primary screen. */ + s32 origin_y; + + /* Offset of visible framebuffer relative to the framebuffer start. */ + u32 start_offset; + + /* The scan line size in bytes. */ + u32 line_size; + + /* Width of the screen. */ + u32 width; + + /* Height of the screen. */ + u32 height; + + /* Color depth. */ + u16 bits_per_pixel; + + /* VBVA_SCREEN_F_* */ + u16 flags; +} __packed; + +/* vbva_enable.flags */ +#define VBVA_F_NONE 0x00000000 +#define VBVA_F_ENABLE 0x00000001 +#define VBVA_F_DISABLE 0x00000002 +/* extended VBVA to be used with WDDM */ +#define VBVA_F_EXTENDED 0x00000004 +/* vbva offset is absolute VRAM offset */ +#define VBVA_F_ABSOFFSET 0x00000008 + +struct vbva_enable { + u32 flags; + u32 offset; + s32 result; +} __packed; + +struct vbva_enable_ex { + struct vbva_enable base; + u32 screen_id; +} __packed; + +struct vbva_mouse_pointer_shape { + /* The host result. */ + s32 result; + + /* VBOX_MOUSE_POINTER_* bit flags. */ + u32 flags; + + /* X coordinate of the hot spot. */ + u32 hot_X; + + /* Y coordinate of the hot spot. */ + u32 hot_y; + + /* Width of the pointer in pixels. */ + u32 width; + + /* Height of the pointer in scanlines. */ + u32 height; + + /* Pointer data. + * + * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color) + * mask. + * + * For pointers without alpha channel the XOR mask pixels are 32 bit + * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask + * consists of (lsb)BGRA(msb) 32 bit values. + * + * Guest driver must create the AND mask for pointers with alpha chan., + * so if host does not support alpha, the pointer could be displayed as + * a normal color pointer. The AND mask can be constructed from alpha + * values. For example alpha value >= 0xf0 means bit 0 in the AND mask. + * + * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND + * mask, therefore, is and_len = (width + 7) / 8 * height. The padding + * bits at the end of any scanline are undefined. + * + * The XOR mask follows the AND mask on the next 4 bytes aligned offset: + * u8 *xor = and + (and_len + 3) & ~3 + * Bytes in the gap between the AND and the XOR mask are undefined. + * XOR mask scanlines have no gap between them and size of XOR mask is: + * xor_len = width * 4 * height. + * + * Preallocate 4 bytes for accessing actual data as p->data. + */ + u8 data[4]; +} __packed; + +/* pointer is visible */ +#define VBOX_MOUSE_POINTER_VISIBLE 0x0001 +/* pointer has alpha channel */ +#define VBOX_MOUSE_POINTER_ALPHA 0x0002 +/* pointerData contains new pointer shape */ +#define VBOX_MOUSE_POINTER_SHAPE 0x0004 + +/* + * The guest driver can handle asynch guest cmd completion by reading the + * command offset from io port. + */ +#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001 +/* the guest driver can handle video adapter IRQs */ +#define VBVACAPS_IRQ 0x00000002 +/* The guest can read video mode hints sent via VBVA. */ +#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004 +/* The guest can switch to a software cursor on demand. */ +#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008 +/* The guest does not depend on host handling the VBE registers. */ +#define VBVACAPS_USE_VBVA_ONLY 0x00000010 + +struct vbva_caps { + s32 rc; + u32 caps; +} __packed; + +/* Query the most recent mode hints received from the host. */ +struct vbva_query_mode_hints { + /* The maximum number of screens to return hints for. */ + u16 hints_queried_count; + /* The size of the mode hint structures directly following this one. */ + u16 hint_structure_guest_size; + /* Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */ + s32 rc; +} __packed; + +/* + * Structure in which a mode hint is returned. The guest allocates an array + * of these immediately after the vbva_query_mode_hints structure. + * To accommodate future extensions, the vbva_query_mode_hints structure + * specifies the size of the vbva_modehint structures allocated by the guest, + * and the host only fills out structure elements which fit into that size. The + * host should fill any unused members (e.g. dx, dy) or structure space on the + * end with ~0. The whole structure can legally be set to ~0 to skip a screen. + */ +struct vbva_modehint { + u32 magic; + u32 cx; + u32 cy; + u32 bpp; /* Which has never been used... */ + u32 display; + u32 dx; /* X offset into the virtual frame-buffer. */ + u32 dy; /* Y offset into the virtual frame-buffer. */ + u32 enabled; /* Not flags. Add new members for new flags. */ +} __packed; + +#define VBVAMODEHINT_MAGIC 0x0801add9u + +/* + * Report the rectangle relative to which absolute pointer events should be + * expressed. This information remains valid until the next VBVA resize event + * for any screen, at which time it is reset to the bounding rectangle of all + * virtual screens and must be re-set. + */ +struct vbva_report_input_mapping { + s32 x; /* Upper left X co-ordinate relative to the first screen. */ + s32 y; /* Upper left Y co-ordinate relative to the first screen. */ + u32 cx; /* Rectangle width. */ + u32 cy; /* Rectangle height. */ +} __packed; + +/* + * Report the guest cursor position and query the host one. The host may wish + * to use the guest information to re-position its own cursor (though this is + * currently unlikely). + */ +struct vbva_cursor_position { + u32 report_position; /* Are we reporting a position? */ + u32 x; /* Guest cursor X position */ + u32 y; /* Guest cursor Y position */ +} __packed; + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h new file mode 100644 index 000000000000..55fcee3a6470 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOXVIDEO_GUEST_H__ +#define __VBOXVIDEO_GUEST_H__ + +#include <linux/genalloc.h> +#include "vboxvideo.h" + +/* + * Structure grouping the context needed for sending graphics acceleration + * information to the host via VBVA. Each screen has its own VBVA buffer. + */ +struct vbva_buf_ctx { + /* Offset of the buffer in the VRAM section for the screen */ + u32 buffer_offset; + /* Length of the buffer in bytes */ + u32 buffer_length; + /* Set if we wrote to the buffer faster than the host could read it */ + bool buffer_overflow; + /* VBVA record that we are currently preparing for the host, or NULL */ + struct vbva_record *record; + /* + * Pointer to the VBVA buffer mapped into the current address space. + * Will be NULL if VBVA is not enabled. + */ + struct vbva_buffer *vbva; +}; + +int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location); +int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps); +int hgsmi_test_query_conf(struct gen_pool *ctx); +int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret); +int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, + u32 hot_x, u32 hot_y, u32 width, u32 height, + u8 *pixels, u32 len); +int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, + u32 x, u32 y, u32 *x_host, u32 *y_host); + +bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + struct vbva_buffer *vbva, s32 screen); +void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + s32 screen); +bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx); +void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx); +bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + const void *p, u32 len); +void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, + u32 buffer_offset, u32 buffer_length); + +void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, + s32 origin_x, s32 origin_y, u32 start_offset, + u32 pitch, u32 width, u32 height, + u16 bpp, u16 flags); +int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, + u32 width, u32 height); +int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, + struct vbva_modehint *hints); + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h new file mode 100644 index 000000000000..427235869297 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOXVIDEO_VBE_H__ +#define __VBOXVIDEO_VBE_H__ + +/* GUEST <-> HOST Communication API */ + +#define VBE_DISPI_BANK_ADDRESS 0xA0000 +#define VBE_DISPI_BANK_SIZE_KB 64 + +#define VBE_DISPI_MAX_XRES 16384 +#define VBE_DISPI_MAX_YRES 16384 +#define VBE_DISPI_MAX_BPP 32 + +#define VBE_DISPI_IOPORT_INDEX 0x01CE +#define VBE_DISPI_IOPORT_DATA 0x01CF + +#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8 +#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9 + +#define VBE_DISPI_INDEX_ID 0x0 +#define VBE_DISPI_INDEX_XRES 0x1 +#define VBE_DISPI_INDEX_YRES 0x2 +#define VBE_DISPI_INDEX_BPP 0x3 +#define VBE_DISPI_INDEX_ENABLE 0x4 +#define VBE_DISPI_INDEX_BANK 0x5 +#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 +#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 +#define VBE_DISPI_INDEX_X_OFFSET 0x8 +#define VBE_DISPI_INDEX_Y_OFFSET 0x9 +#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa +#define VBE_DISPI_INDEX_FB_BASE_HI 0xb + +#define VBE_DISPI_ID0 0xB0C0 +#define VBE_DISPI_ID1 0xB0C1 +#define VBE_DISPI_ID2 0xB0C2 +#define VBE_DISPI_ID3 0xB0C3 +#define VBE_DISPI_ID4 0xB0C4 + +#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00 +/* The VBOX interface id. Indicates support for VBVA shared memory interface. */ +#define VBE_DISPI_ID_HGSMI 0xBE01 +#define VBE_DISPI_ID_ANYX 0xBE02 + +#define VBE_DISPI_DISABLED 0x00 +#define VBE_DISPI_ENABLED 0x01 +#define VBE_DISPI_GETCAPS 0x02 +#define VBE_DISPI_8BIT_DAC 0x20 + +#define VGA_PORT_HGSMI_HOST 0x3b0 +#define VGA_PORT_HGSMI_GUEST 0x3d0 + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vbva_base.c b/drivers/gpu/drm/vboxvideo/vbva_base.c new file mode 100644 index 000000000000..36bc9824ec3f --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbva_base.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: MIT +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#include <linux/vbox_err.h> +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "hgsmi_channels.h" + +/* + * There is a hardware ring buffer in the graphics device video RAM, formerly + * in the VBox VMMDev PCI memory space. + * All graphics commands go there serialized by vbva_buffer_begin_update. + * and vbva_buffer_end_update. + * + * free_offset is writing position. data_offset is reading position. + * free_offset == data_offset means buffer is empty. + * There must be always gap between data_offset and free_offset when data + * are in the buffer. + * Guest only changes free_offset, host changes data_offset. + */ + +static u32 vbva_buffer_available(const struct vbva_buffer *vbva) +{ + s32 diff = vbva->data_offset - vbva->free_offset; + + return diff > 0 ? diff : vbva->data_len + diff; +} + +static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx, + const void *p, u32 len, u32 offset) +{ + struct vbva_buffer *vbva = vbva_ctx->vbva; + u32 bytes_till_boundary = vbva->data_len - offset; + u8 *dst = &vbva->data[offset]; + s32 diff = len - bytes_till_boundary; + + if (diff <= 0) { + /* Chunk will not cross buffer boundary. */ + memcpy(dst, p, len); + } else { + /* Chunk crosses buffer boundary. */ + memcpy(dst, p, bytes_till_boundary); + memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff); + } +} + +static void vbva_buffer_flush(struct gen_pool *ctx) +{ + struct vbva_flush *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH); + if (!p) + return; + + p->reserved = 0; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); +} + +bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + const void *p, u32 len) +{ + struct vbva_record *record; + struct vbva_buffer *vbva; + u32 available; + + vbva = vbva_ctx->vbva; + record = vbva_ctx->record; + + if (!vbva || vbva_ctx->buffer_overflow || + !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)) + return false; + + available = vbva_buffer_available(vbva); + + while (len > 0) { + u32 chunk = len; + + if (chunk >= available) { + vbva_buffer_flush(ctx); + available = vbva_buffer_available(vbva); + } + + if (chunk >= available) { + if (WARN_ON(available <= vbva->partial_write_tresh)) { + vbva_ctx->buffer_overflow = true; + return false; + } + chunk = available - vbva->partial_write_tresh; + } + + vbva_buffer_place_data_at(vbva_ctx, p, chunk, + vbva->free_offset); + + vbva->free_offset = (vbva->free_offset + chunk) % + vbva->data_len; + record->len_and_flags += chunk; + available -= chunk; + len -= chunk; + p += chunk; + } + + return true; +} + +static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx, s32 screen, bool enable) +{ + struct vbva_enable_ex *p; + bool ret; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE); + if (!p) + return false; + + p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE; + p->base.offset = vbva_ctx->buffer_offset; + p->base.result = VERR_NOT_SUPPORTED; + if (screen >= 0) { + p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET; + p->screen_id = screen; + } + + hgsmi_buffer_submit(ctx, p); + + if (enable) + ret = p->base.result >= 0; + else + ret = true; + + hgsmi_buffer_free(ctx, p); + + return ret; +} + +bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + struct vbva_buffer *vbva, s32 screen) +{ + bool ret = false; + + memset(vbva, 0, sizeof(*vbva)); + vbva->partial_write_tresh = 256; + vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer); + vbva_ctx->vbva = vbva; + + ret = vbva_inform_host(vbva_ctx, ctx, screen, true); + if (!ret) + vbva_disable(vbva_ctx, ctx, screen); + + return ret; +} + +void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + s32 screen) +{ + vbva_ctx->buffer_overflow = false; + vbva_ctx->record = NULL; + vbva_ctx->vbva = NULL; + + vbva_inform_host(vbva_ctx, ctx, screen, false); +} + +bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx) +{ + struct vbva_record *record; + u32 next; + + if (!vbva_ctx->vbva || + !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) + return false; + + WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record); + + next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS; + + /* Flush if all slots in the records queue are used */ + if (next == vbva_ctx->vbva->record_first_index) + vbva_buffer_flush(ctx); + + /* If even after flush there is no place then fail the request */ + if (next == vbva_ctx->vbva->record_first_index) + return false; + + record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index]; + record->len_and_flags = VBVA_F_RECORD_PARTIAL; + vbva_ctx->vbva->record_free_index = next; + /* Remember which record we are using. */ + vbva_ctx->record = record; + + return true; +} + +void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx) +{ + struct vbva_record *record = vbva_ctx->record; + + WARN_ON(!vbva_ctx->vbva || !record || + !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)); + + /* Mark the record completed. */ + record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL; + + vbva_ctx->buffer_overflow = false; + vbva_ctx->record = NULL; +} + +void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, + u32 buffer_offset, u32 buffer_length) +{ + vbva_ctx->buffer_offset = buffer_offset; + vbva_ctx->buffer_length = buffer_length; +} diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 8dcce7182bb7..92e3f98d8478 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -201,8 +201,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo) bo->validated_shader = NULL; } - reservation_object_fini(&bo->_resv); - drm_gem_cma_free_object(obj); } @@ -427,8 +425,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; mutex_unlock(&vc4->bo_lock); - bo->resv = &bo->_resv; - reservation_object_init(bo->resv); return &bo->base.base; } @@ -684,13 +680,6 @@ static void vc4_bo_cache_time_timer(struct timer_list *t) schedule_work(&vc4->bo_cache.time_work); } -struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj) -{ - struct vc4_bo *bo = to_vc4_bo(obj); - - return bo->resv; -} - struct dma_buf * vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { @@ -822,14 +811,12 @@ vc4_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt) { struct drm_gem_object *obj; - struct vc4_bo *bo; obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) return obj; - bo = to_vc4_bo(obj); - bo->resv = attach->dmabuf->resv; + obj->resv = attach->dmabuf->resv; return obj; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 730008d3da76..64c964b7c577 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -834,6 +834,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; drm_crtc_vblank_put(crtc); + + /* Wait for the page flip to unmask the underrun to ensure that + * the display list was updated by the hardware. Before that + * happens, the HVS will be using the previous display list with + * the CRTC and encoder already reconfigured, leading to + * underruns. This can be seen when reconfiguring the CRTC. + */ + vc4_hvs_unmask_underrun(dev, vc4_crtc->channel); } spin_unlock_irqrestore(&dev->event_lock, flags); } diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index 7a0003de71ab..59cdad89f844 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -23,6 +23,7 @@ static const struct drm_info_list vc4_debugfs_list[] = { {"vec_regs", vc4_vec_debugfs_regs, 0}, {"txp_regs", vc4_txp_debugfs_regs, 0}, {"hvs_regs", vc4_hvs_debugfs_regs, 0}, + {"hvs_underrun", vc4_hvs_debugfs_underrun, 0}, {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, @@ -35,6 +36,15 @@ static const struct drm_info_list vc4_debugfs_list[] = { int vc4_debugfs_init(struct drm_minor *minor) { + struct vc4_dev *vc4 = to_vc4_dev(minor->dev); + struct dentry *dentry; + + dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, + minor->debugfs_root, + &vc4->load_tracker_enabled); + if (!dentry) + return -ENOMEM; + return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, minor->debugfs_root, minor); } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 5fcd2f0da7f7..4daf44fd4548 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -200,7 +200,6 @@ static struct drm_driver vc4_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = drm_gem_prime_import, .gem_prime_export = vc4_prime_export, - .gem_prime_res_obj = vc4_prime_res_obj, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_import_sg_table = vc4_prime_import_sg_table, .gem_prime_vmap = vc4_prime_vmap, @@ -287,7 +286,7 @@ static int vc4_drm_bind(struct device *dev) vc4_kms_load(drm); - drm_fbdev_generic_setup(drm, 32); + drm_fbdev_generic_setup(drm, 16); return 0; @@ -312,6 +311,7 @@ static void vc4_drm_unbind(struct device *dev) drm_mode_config_cleanup(drm); + drm_atomic_private_obj_fini(&vc4->load_tracker); drm_atomic_private_obj_fini(&vc4->ctm_manager); drm_dev_put(drm); diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 2c635f001c71..7a3c093e7443 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -7,7 +7,6 @@ */ #include <linux/mm_types.h> -#include <linux/reservation.h> #include <drm/drmP.h> #include <drm/drm_util.h> #include <drm/drm_encoder.h> @@ -185,10 +184,20 @@ struct vc4_dev { /* Bitmask of the current bin_alloc used for overflow memory. */ uint32_t bin_alloc_overflow; + /* Incremented when an underrun error happened after an atomic commit. + * This is particularly useful to detect when a specific modeset is too + * demanding in term of memory or HVS bandwidth which is hard to guess + * at atomic check time. + */ + atomic_t underrun; + struct work_struct overflow_mem_work; int power_refcount; + /* Set to true when the load tracker is active. */ + bool load_tracker_enabled; + /* Mutex controlling the power refcount. */ struct mutex power_lock; @@ -201,6 +210,7 @@ struct vc4_dev { struct drm_modeset_lock ctm_state_lock; struct drm_private_obj ctm_manager; + struct drm_private_obj load_tracker; }; static inline struct vc4_dev * @@ -240,10 +250,6 @@ struct vc4_bo { */ struct vc4_validated_shader_info *validated_shader; - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; - /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i * for user-allocated labels. */ @@ -376,6 +382,16 @@ struct vc4_plane_state { * when async update is not possible. */ bool dlist_initialized; + + /* Load of this plane on the HVS block. The load is expressed in HVS + * cycles/sec. + */ + u64 hvs_load; + + /* Memory bandwidth needed for this plane. This is expressed in + * bytes/sec. + */ + u64 membus_load; }; static inline struct vc4_plane_state * @@ -685,7 +701,6 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); vm_fault_t vc4_fault(struct vm_fault *vmf); int vc4_mmap(struct file *filp, struct vm_area_struct *vma); -struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj); int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, @@ -773,6 +788,9 @@ void vc4_irq_reset(struct drm_device *dev); extern struct platform_driver vc4_hvs_driver; void vc4_hvs_dump_state(struct drm_device *dev); int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); +int vc4_hvs_debugfs_underrun(struct seq_file *m, void *unused); +void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); +void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); /* vc4_kms.c */ int vc4_kms_load(struct drm_device *dev); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index aea2b8dfec17..5ee5bf7fedf7 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -536,7 +536,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->bo[i]->base); bo->seqno = seqno; - reservation_object_add_shared_fence(bo->resv, exec->fence); + reservation_object_add_shared_fence(bo->base.base.resv, exec->fence); } list_for_each_entry(bo, &exec->unref_list, unref_head) { @@ -547,7 +547,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); bo->write_seqno = seqno; - reservation_object_add_excl_fence(bo->resv, exec->fence); + reservation_object_add_excl_fence(bo->base.base.resv, exec->fence); } } @@ -559,7 +559,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev, int i; for (i = 0; i < exec->bo_count; i++) { - struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); + struct drm_gem_object *bo = &exec->bo[i]->base; ww_mutex_unlock(&bo->resv->lock); } @@ -581,13 +581,13 @@ vc4_lock_bo_reservations(struct drm_device *dev, { int contended_lock = -1; int i, ret; - struct vc4_bo *bo; + struct drm_gem_object *bo; ww_acquire_init(acquire_ctx, &reservation_ww_class); retry: if (contended_lock != -1) { - bo = to_vc4_bo(&exec->bo[contended_lock]->base); + bo = &exec->bo[contended_lock]->base; ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, acquire_ctx); if (ret) { @@ -600,19 +600,19 @@ retry: if (i == contended_lock) continue; - bo = to_vc4_bo(&exec->bo[i]->base); + bo = &exec->bo[i]->base; ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) { - bo = to_vc4_bo(&exec->bo[j]->base); + bo = &exec->bo[j]->base; ww_mutex_unlock(&bo->resv->lock); } if (contended_lock != -1 && contended_lock >= i) { - bo = to_vc4_bo(&exec->bo[contended_lock]->base); + bo = &exec->bo[contended_lock]->base; ww_mutex_unlock(&bo->resv->lock); } @@ -633,7 +633,7 @@ retry: * before we commit the CL to the hardware. */ for (i = 0; i < exec->bo_count; i++) { - bo = to_vc4_bo(&exec->bo[i]->base); + bo = &exec->bo[i]->base; ret = reservation_object_reserve_shared(bo->resv, 1); if (ret) { diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 5d8c749c9749..918e71256ecc 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -22,6 +22,7 @@ * each CRTC. */ +#include <drm/drm_atomic_helper.h> #include <linux/component.h> #include "vc4_drv.h" #include "vc4_regs.h" @@ -102,6 +103,18 @@ int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused) return 0; } + +int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_printer p = drm_seq_file_printer(m); + + drm_printf(&p, "%d\n", atomic_read(&vc4->underrun)); + + return 0; +} #endif /* The filter kernel is composed of dwords each containing 3 9-bit @@ -166,6 +179,67 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs, return 0; } +void vc4_hvs_mask_underrun(struct drm_device *dev, int channel) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + u32 dispctrl = HVS_READ(SCALER_DISPCTRL); + + dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel); + + HVS_WRITE(SCALER_DISPCTRL, dispctrl); +} + +void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + u32 dispctrl = HVS_READ(SCALER_DISPCTRL); + + dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel); + + HVS_WRITE(SCALER_DISPSTAT, + SCALER_DISPSTAT_EUFLOW(channel)); + HVS_WRITE(SCALER_DISPCTRL, dispctrl); +} + +static void vc4_hvs_report_underrun(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + atomic_inc(&vc4->underrun); + DRM_DEV_ERROR(dev->dev, "HVS underrun\n"); +} + +static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct vc4_dev *vc4 = to_vc4_dev(dev); + irqreturn_t irqret = IRQ_NONE; + int channel; + u32 control; + u32 status; + + status = HVS_READ(SCALER_DISPSTAT); + control = HVS_READ(SCALER_DISPCTRL); + + for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { + /* Interrupt masking is not always honored, so check it here. */ + if (status & SCALER_DISPSTAT_EUFLOW(channel) && + control & SCALER_DISPCTRL_DSPEISLUR(channel)) { + vc4_hvs_mask_underrun(dev, channel); + vc4_hvs_report_underrun(dev); + + irqret = IRQ_HANDLED; + } + } + + /* Clear every per-channel interrupt flag. */ + HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) | + SCALER_DISPSTAT_IRQMASK(1) | + SCALER_DISPSTAT_IRQMASK(2)); + + return irqret; +} + static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); @@ -219,15 +293,36 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl |= SCALER_DISPCTRL_ENABLE; + dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | + SCALER_DISPCTRL_DISPEIRQ(1) | + SCALER_DISPCTRL_DISPEIRQ(2); /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise * be unused. */ dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; + dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | + SCALER_DISPCTRL_SLVWREIRQ | + SCALER_DISPCTRL_SLVRDEIRQ | + SCALER_DISPCTRL_DSPEIEOF(0) | + SCALER_DISPCTRL_DSPEIEOF(1) | + SCALER_DISPCTRL_DSPEIEOF(2) | + SCALER_DISPCTRL_DSPEIEOLN(0) | + SCALER_DISPCTRL_DSPEIEOLN(1) | + SCALER_DISPCTRL_DSPEIEOLN(2) | + SCALER_DISPCTRL_DSPEISLUR(0) | + SCALER_DISPCTRL_DSPEISLUR(1) | + SCALER_DISPCTRL_DSPEISLUR(2) | + SCALER_DISPCTRL_SCLEIRQ); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); HVS_WRITE(SCALER_DISPCTRL, dispctrl); + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), + vc4_hvs_irq_handler, 0, "vc4 hvs", drm); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 91b8c72ff361..5160cad25fce 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -34,6 +34,18 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) return container_of(priv, struct vc4_ctm_state, base); } +struct vc4_load_tracker_state { + struct drm_private_state base; + u64 hvs_load; + u64 membus_load; +}; + +static struct vc4_load_tracker_state * +to_vc4_load_tracker_state(struct drm_private_state *priv) +{ + return container_of(priv, struct vc4_load_tracker_state, base); +} + static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, struct drm_private_obj *manager) { @@ -138,6 +150,16 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc; + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + if (!state->crtcs[i].ptr || !state->crtcs[i].commit) + continue; + + vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); + vc4_hvs_mask_underrun(dev, vc4_crtc->channel); + } drm_atomic_helper_wait_for_fences(dev, state, false); @@ -385,6 +407,85 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) return 0; } +static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) +{ + struct drm_plane_state *old_plane_state, *new_plane_state; + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct vc4_load_tracker_state *load_state; + struct drm_private_state *priv_state; + struct drm_plane *plane; + int i; + + priv_state = drm_atomic_get_private_obj_state(state, + &vc4->load_tracker); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + load_state = to_vc4_load_tracker_state(priv_state); + for_each_oldnew_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + struct vc4_plane_state *vc4_plane_state; + + if (old_plane_state->fb && old_plane_state->crtc) { + vc4_plane_state = to_vc4_plane_state(old_plane_state); + load_state->membus_load -= vc4_plane_state->membus_load; + load_state->hvs_load -= vc4_plane_state->hvs_load; + } + + if (new_plane_state->fb && new_plane_state->crtc) { + vc4_plane_state = to_vc4_plane_state(new_plane_state); + load_state->membus_load += vc4_plane_state->membus_load; + load_state->hvs_load += vc4_plane_state->hvs_load; + } + } + + /* Don't check the load when the tracker is disabled. */ + if (!vc4->load_tracker_enabled) + return 0; + + /* The absolute limit is 2Gbyte/sec, but let's take a margin to let + * the system work when other blocks are accessing the memory. + */ + if (load_state->membus_load > SZ_1G + SZ_512M) + return -ENOSPC; + + /* HVS clock is supposed to run @ 250Mhz, let's take a margin and + * consider the maximum number of cycles is 240M. + */ + if (load_state->hvs_load > 240000000ULL) + return -ENOSPC; + + return 0; +} + +static struct drm_private_state * +vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) +{ + struct vc4_load_tracker_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct vc4_load_tracker_state *load_state; + + load_state = to_vc4_load_tracker_state(state); + kfree(load_state); +} + +static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { + .atomic_duplicate_state = vc4_load_tracker_duplicate_state, + .atomic_destroy_state = vc4_load_tracker_destroy_state, +}; + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -394,7 +495,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) if (ret < 0) return ret; - return drm_atomic_helper_check(dev, state); + ret = drm_atomic_helper_check(dev, state); + if (ret) + return ret; + + return vc4_load_tracker_atomic_check(state); } static const struct drm_mode_config_funcs vc4_mode_funcs = { @@ -407,8 +512,14 @@ int vc4_kms_load(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_ctm_state *ctm_state; + struct vc4_load_tracker_state *load_state; int ret; + /* Start with the load tracker enabled. Can be disabled through the + * debugfs load_tracker file. + */ + vc4->load_tracker_enabled = true; + sema_init(&vc4->async_modeset, 1); /* Set support for vblank irq fast disable, before drm_vblank_init() */ @@ -436,6 +547,15 @@ int vc4_kms_load(struct drm_device *dev) drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) { + drm_atomic_private_obj_fini(&vc4->ctm_manager); + return -ENOMEM; + } + + drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, + &vc4_load_tracker_state_funcs); + drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index d098337c10e9..4d918d3e4858 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -488,6 +488,61 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state, } } +static void vc4_plane_calc_load(struct drm_plane_state *state) +{ + unsigned int hvs_load_shift, vrefresh, i; + struct drm_framebuffer *fb = state->fb; + struct vc4_plane_state *vc4_state; + struct drm_crtc_state *crtc_state; + unsigned int vscale_factor; + + vc4_state = to_vc4_plane_state(state); + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode); + + /* The HVS is able to process 2 pixels/cycle when scaling the source, + * 4 pixels/cycle otherwise. + * Alpha blending step seems to be pipelined and it's always operating + * at 4 pixels/cycle, so the limiting aspect here seems to be the + * scaler block. + * HVS load is expressed in clk-cycles/sec (AKA Hz). + */ + if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || + vc4_state->x_scaling[1] != VC4_SCALING_NONE || + vc4_state->y_scaling[0] != VC4_SCALING_NONE || + vc4_state->y_scaling[1] != VC4_SCALING_NONE) + hvs_load_shift = 1; + else + hvs_load_shift = 2; + + vc4_state->membus_load = 0; + vc4_state->hvs_load = 0; + for (i = 0; i < fb->format->num_planes; i++) { + /* Even if the bandwidth/plane required for a single frame is + * + * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh + * + * when downscaling, we have to read more pixels per line in + * the time frame reserved for a single line, so the bandwidth + * demand can be punctually higher. To account for that, we + * calculate the down-scaling factor and multiply the plane + * load by this number. We're likely over-estimating the read + * demand, but that's better than under-estimating it. + */ + vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i], + vc4_state->crtc_h); + vc4_state->membus_load += vc4_state->src_w[i] * + vc4_state->src_h[i] * vscale_factor * + fb->format->cpp[i]; + vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w; + } + + vc4_state->hvs_load *= vrefresh; + vc4_state->hvs_load >>= hvs_load_shift; + vc4_state->membus_load *= vrefresh; +} + static int vc4_plane_allocate_lbm(struct drm_plane_state *state) { struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); @@ -875,6 +930,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, */ vc4_state->dlist_initialized = 1; + vc4_plane_calc_load(state); + return 0; } @@ -1082,7 +1139,7 @@ static int vc4_prepare_fb(struct drm_plane *plane, bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); - fence = reservation_object_get_excl_rcu(bo->resv); + fence = reservation_object_get_excl_rcu(bo->base.base.resv); drm_atomic_set_fence_for_plane(state, fence); if (plane->state->fb == state->fb) diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 931088014272..c0c5fadaf7e3 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -212,11 +212,11 @@ #define PV_HACT_ACT 0x30 +#define SCALER_CHANNELS_COUNT 3 + #define SCALER_DISPCTRL 0x00000000 /* Global register for clock gating the HVS */ # define SCALER_DISPCTRL_ENABLE BIT(31) -# define SCALER_DISPCTRL_DSP2EISLUR BIT(15) -# define SCALER_DISPCTRL_DSP1EISLUR BIT(14) # define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18) # define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18 @@ -224,45 +224,25 @@ * SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are * always enabled. */ -# define SCALER_DISPCTRL_DSP0EISLUR BIT(13) -# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12) -# define SCALER_DISPCTRL_DSP2EIEOF BIT(11) -# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10) -# define SCALER_DISPCTRL_DSP1EIEOF BIT(9) +# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x)) /* Enables Display 0 end-of-line-N contribution to * SCALER_DISPSTAT_IRQDISP0 */ -# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8) +# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2)) /* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */ -# define SCALER_DISPCTRL_DSP0EIEOF BIT(7) +# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2)) # define SCALER_DISPCTRL_SLVRDEIRQ BIT(6) # define SCALER_DISPCTRL_SLVWREIRQ BIT(5) # define SCALER_DISPCTRL_DMAEIRQ BIT(4) -# define SCALER_DISPCTRL_DISP2EIRQ BIT(3) -# define SCALER_DISPCTRL_DISP1EIRQ BIT(2) /* Enables interrupt generation on the enabled EOF/EOLN/EISLUR * bits and short frames.. */ -# define SCALER_DISPCTRL_DISP0EIRQ BIT(1) +# define SCALER_DISPCTRL_DISPEIRQ(x) BIT(1 + (x)) /* Enables interrupt generation on scaler profiler interrupt. */ # define SCALER_DISPCTRL_SCLEIRQ BIT(0) #define SCALER_DISPSTAT 0x00000004 -# define SCALER_DISPSTAT_COBLOW2 BIT(29) -# define SCALER_DISPSTAT_EOLN2 BIT(28) -# define SCALER_DISPSTAT_ESFRAME2 BIT(27) -# define SCALER_DISPSTAT_ESLINE2 BIT(26) -# define SCALER_DISPSTAT_EUFLOW2 BIT(25) -# define SCALER_DISPSTAT_EOF2 BIT(24) - -# define SCALER_DISPSTAT_COBLOW1 BIT(21) -# define SCALER_DISPSTAT_EOLN1 BIT(20) -# define SCALER_DISPSTAT_ESFRAME1 BIT(19) -# define SCALER_DISPSTAT_ESLINE1 BIT(18) -# define SCALER_DISPSTAT_EUFLOW1 BIT(17) -# define SCALER_DISPSTAT_EOF1 BIT(16) - # define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14) # define SCALER_DISPSTAT_RESP_SHIFT 14 # define SCALER_DISPSTAT_RESP_OKAY 0 @@ -270,23 +250,26 @@ # define SCALER_DISPSTAT_RESP_SLVERR 2 # define SCALER_DISPSTAT_RESP_DECERR 3 -# define SCALER_DISPSTAT_COBLOW0 BIT(13) +# define SCALER_DISPSTAT_COBLOW(x) BIT(13 + ((x) * 8)) /* Set when the DISPEOLN line is done compositing. */ -# define SCALER_DISPSTAT_EOLN0 BIT(12) +# define SCALER_DISPSTAT_EOLN(x) BIT(12 + ((x) * 8)) /* Set when VSTART is seen but there are still pixels in the current * output line. */ -# define SCALER_DISPSTAT_ESFRAME0 BIT(11) +# define SCALER_DISPSTAT_ESFRAME(x) BIT(11 + ((x) * 8)) /* Set when HSTART is seen but there are still pixels in the current * output line. */ -# define SCALER_DISPSTAT_ESLINE0 BIT(10) +# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8)) /* Set when the the downstream tries to read from the display FIFO * while it's empty. */ -# define SCALER_DISPSTAT_EUFLOW0 BIT(9) +# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8)) /* Set when the display mode changes from RUN to EOF */ -# define SCALER_DISPSTAT_EOF0 BIT(8) +# define SCALER_DISPSTAT_EOF(x) BIT(8 + ((x) * 8)) + +# define SCALER_DISPSTAT_IRQMASK(x) VC4_MASK(13 + ((x) * 8), \ + 8 + ((x) * 8)) /* Set on AXI invalid DMA ID error. */ # define SCALER_DISPSTAT_DMA_ERROR BIT(7) @@ -298,12 +281,10 @@ * SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY. */ # define SCALER_DISPSTAT_IRQDMA BIT(4) -# define SCALER_DISPSTAT_IRQDISP2 BIT(3) -# define SCALER_DISPSTAT_IRQDISP1 BIT(2) /* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their * corresponding interrupt bit is enabled in DISPCTRL. */ -# define SCALER_DISPSTAT_IRQDISP0 BIT(1) +# define SCALER_DISPSTAT_IRQDISP(x) BIT(1 + (x)) /* On read, the profiler interrupt. On write, clear *all* interrupt bits. */ # define SCALER_DISPSTAT_IRQSCL BIT(0) diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 5dabd91f2d7e..cc2888dd7171 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -249,7 +249,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn, struct drm_connector_state *conn_state) { struct drm_crtc_state *crtc_state; - struct drm_gem_cma_object *gem; struct drm_framebuffer *fb; int i; @@ -275,8 +274,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn, if (i == ARRAY_SIZE(drm_fmts)) return -EINVAL; - gem = drm_fb_cma_get_gem_obj(fb, 0); - /* Pitch must be aligned on 16 bytes. */ if (fb->pitches[0] & GENMASK(3, 0)) return -EINVAL; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b996ac1d4fcc..7c2893181ba4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -205,10 +205,10 @@ static struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_pin = virtgpu_gem_prime_pin, - .gem_prime_unpin = virtgpu_gem_prime_unpin, + .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, .gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3238fdf58eb4..86a264cee362 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -352,8 +352,7 @@ void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ -int virtgpu_gem_prime_pin(struct drm_gem_object *obj); -void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index c59ec34c80a5..22ef151410e0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -28,15 +28,16 @@ * device that might share buffers with virtgpu */ -int virtgpu_gem_prime_pin(struct drm_gem_object *obj) +struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) { - WARN_ONCE(1, "not implemented"); - return -ENODEV; -} + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); -void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); + if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) + /* should not happen */ + return ERR_PTR(-EINVAL); + + return drm_prime_pages_to_sg(bo->tbo.ttm->pages, + bo->tbo.ttm->num_pages); } void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) @@ -56,7 +57,10 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) } int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *area) + struct vm_area_struct *vma) { - return -ENODEV; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start; + return drm_gem_prime_mmap(obj, vma); } diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 3e78a832d7f9..84aa4d61dc42 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -582,6 +582,7 @@ static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) drm_kms_helper_poll_fini(dev); drm_dev_unplug(dev); + drm_dev_put(dev); front_info->drm_info = NULL; |