summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c15
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c27
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c21
-rw-r--r--drivers/gpu/drm/bridge/panel.c37
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c18
-rw-r--r--drivers/gpu/drm/drm_bridge.c7
-rw-r--r--drivers/gpu/drm/drm_connector.c83
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_edid.c280
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c23
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c11
-rw-r--r--drivers/gpu/drm/drm_of.c3
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c49
-rw-r--r--drivers/gpu/drm/drm_prime.c13
-rw-r--r--drivers/gpu/drm/drm_print.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c87
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/gud/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c19
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c21
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c22
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h5
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c15
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c28
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c60
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h5
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c3
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c35
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c11
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c16
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/Kconfig27
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c1895
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c85
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c320
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1100
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c33
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c48
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c57
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c3
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c26
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c140
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c62
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c199
-rw-r--r--drivers/gpu/drm/stm/ltdc.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c21
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/plane.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c73
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c58
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/Kconfig2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h6
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c129
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c86
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c342
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c32
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c24
-rw-r--r--drivers/gpu/drm/zte/Kconfig10
-rw-r--r--drivers/gpu/drm/zte/Makefile10
-rw-r--r--drivers/gpu/drm/zte/zx_common_regs.h28
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c184
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h34
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c760
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h66
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c537
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h120
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c400
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc_regs.h27
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c527
-rw-r--r--drivers/gpu/drm/zte/zx_vga_regs.h33
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c921
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h64
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h212
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c11
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c30
228 files changed, 4214 insertions, 6949 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cea777ae7fb9..b17e231ca6f7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER
config DRM_GEM_SHMEM_HELPER
bool
- depends on DRM
+ depends on DRM && MMU
help
Choose this if you need the GEM shmem helper functions
@@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
tristate "Virtual GEM provider"
- depends on DRM
+ depends on DRM && MMU
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option to get a virtual graphics memory manager,
as used by Mesa's software renderer for enhanced performance.
@@ -279,7 +280,7 @@ config DRM_VGEM
config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
- depends on DRM
+ depends on DRM && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
@@ -351,8 +352,6 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
-source "drivers/gpu/drm/zte/Kconfig"
-
source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ad1112154898..0dff40bb863c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -113,7 +113,6 @@ obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
-obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 913f9eaa9cd6..0311d799a010 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1222,6 +1222,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
+ drm_sched_job_arm(&job->base);
+
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
* added to BOs.
@@ -1259,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8d682befe0d6..a25e192e8a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -266,7 +266,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
- int r;
do {
last_seq = atomic_read(&ring->fence_drv.last_seq);
@@ -298,12 +297,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = dma_fence_signal(fence);
- if (!r)
- DMA_FENCE_TRACE(fence, "signaled from irq context\n");
- else
- BUG();
-
+ dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -684,8 +678,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
-
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 3ac39b44a211..cc2e0c9cfe0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -509,7 +509,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
adev->irq.virq[src_id]) {
- generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
+ generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index de29518673dd..4e30a09bc887 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -182,9 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
+ drm_sched_job_arm(&job->base);
+
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 94126dc39688..820fcb24231f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1066,8 +1066,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- amdgpu_ttm_backend_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1148,6 +1146,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
+ amdgpu_ttm_backend_unbind(bdev, ttm);
+
if (gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index a612ba6dc389..ab6bc5d79012 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -28,9 +28,9 @@
*/
#include <linux/delay.h>
+#include <linux/stdarg.h>
#include "dm_services.h"
-#include <stdarg.h>
#include "dc.h"
#include "dc_dmub_srv.h"
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 359f091e37f1..13a723395de2 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -39,7 +39,6 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <stdarg.h>
#include "atomfirmware.h"
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 8c2ab3d653b7..0562bdaac00c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
return !malidp_hw_format_is_afbc_only(format);
}
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ if (!fourcc_mod_is_vendor(modifier, ARM)) {
DRM_ERROR("Unknown modifier (not Arm)\n");
return false;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index a20a45c0b353..28d9becc939c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* adv7511_cec.c - Analog Devices ADV7511/33 cec driver
*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/device.h>
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 14d73fb1dd15..1a871f6b6822 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx,
ret = sp_tx_aux_rd(ctx, 0xf1);
if (ret) {
- sp_tx_rst_aux(ctx);
+ ret = sp_tx_rst_aux(ctx);
DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
} else {
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
@@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
- return 0;
+ return ret;
}
static int segments_edid_read(struct anx7625_data *ctx,
@@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
- return 0;
+ return ret;
}
static int sp_tx_edid_read(struct anx7625_data *ctx,
@@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
- segments_edid_read(ctx, count / 2,
- pblock_buf, offset);
+ ret = segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ if (ret < 0)
+ return ret;
+
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
- segments_edid_read(ctx, count / 2,
- pblock_buf, offset);
+ ret = segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ if (ret < 0)
+ return ret;
+
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
}
/* Reset aux channel */
- sp_tx_rst_aux(ctx);
+ ret = sp_tx_rst_aux(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n");
+ return ret;
+ }
return (blocks_num + 1);
}
@@ -1325,7 +1335,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_MODE_VIDEO_HSE;
if (mipi_dsi_attach(dsi) < 0) {
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index e6e331071a00..d8a15c459b42 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1171,7 +1171,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
{
struct cdns_dsi *dsi;
struct cdns_dsi_input *input;
- struct resource *res;
int ret, irq;
u32 val;
@@ -1183,8 +1182,7 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
input = &dsi->input;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, res);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 2f2a09adb4bc..06b59b422c69 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -889,7 +889,7 @@ unlock:
static int it66121_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- u32 vendor_ids[2], device_ids[2], revision_id;
+ u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
struct it66121_ctx *ctx;
@@ -918,11 +918,26 @@ static int it66121_probe(struct i2c_client *client,
return -EINVAL;
ep = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!ep)
- return -EPROBE_DEFER;
+ if (!ep) {
+ dev_err(ctx->dev, "The endpoint is unconnected\n");
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(ep)) {
+ of_node_put(ep);
+ dev_err(ctx->dev, "The remote device is disabled\n");
+ return -ENODEV;
+ }
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
+ if (!ctx->next_bridge) {
+ dev_dbg(ctx->dev, "Next bridge not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (!ctx->next_bridge)
+ return -EPROBE_DEFER;
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index c916f4b8907e..b32295abd9e7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -9,6 +9,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -332,3 +333,39 @@ struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge)
return &panel_bridge->connector;
}
EXPORT_SYMBOL(drm_panel_bridge_connector);
+
+#ifdef CONFIG_OF
+/**
+ * devm_drm_of_get_bridge - Return next bridge in the chain
+ * @dev: device to tie the bridge lifetime to
+ * @np: device tree node containing encoder output ports
+ * @port: port in the device tree node
+ * @endpoint: endpoint in the device tree node
+ *
+ * Given a DT node's port and endpoint number, finds the connected node
+ * and returns the associated bridge if any, or creates and returns a
+ * drm panel bridge instance if a panel is connected.
+ *
+ * Returns a pointer to the bridge if successful, or an error pointer
+ * otherwise.
+ */
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *np,
+ u32 port, u32 endpoint)
+{
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(np, port, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (panel)
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+
+ return bridge;
+}
+EXPORT_SYMBOL(devm_drm_of_get_bridge);
+#endif
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 7bd0affa057a..685e9c38b2db 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -18,16 +18,18 @@
#include <drm/drm_print.h>
#define PAGE2_GPIO_H 0xa7
-#define PS_GPIO9 BIT(1)
+#define PS_GPIO9 BIT(1)
#define PAGE2_I2C_BYPASS 0xea
-#define I2C_BYPASS_EN 0xd0
+#define I2C_BYPASS_EN 0xd0
#define PAGE2_MCS_EN 0xf3
-#define MCS_EN BIT(0)
+#define MCS_EN BIT(0)
+
#define PAGE3_SET_ADD 0xfe
-#define VDO_CTL_ADD 0x13
-#define VDO_DIS 0x18
-#define VDO_EN 0x1c
-#define DP_NUM_LANES 4
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+
+#define NUM_MIPI_LANES 4
/*
* PS8640 uses multiple addresses:
@@ -254,7 +256,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = DP_NUM_LANES;
+ dsi->lanes = NUM_MIPI_LANES;
ret = mipi_dsi_attach(dsi);
if (ret)
goto err_dsi_attach;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index a8ed66751c2d..7ee29f073857 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -28,6 +28,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -51,10 +52,8 @@
*
* Display drivers are responsible for linking encoders with the first bridge
* in the chains. This is done by acquiring the appropriate bridge with
- * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
- * panel with drm_panel_bridge_add_typed() (or the managed version
- * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
- * attached to the encoder with a call to drm_bridge_attach().
+ * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
+ * encoder with a call to drm_bridge_attach().
*
* Bridges are responsible for linking themselves with the next bridge in the
* chain, if any. This is done the same way as for encoders, with the call to
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2ba257b1ae20..3bc782b630b9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -65,6 +65,14 @@
* support can instead use e.g. drm_helper_hpd_irq_event().
*/
+/*
+ * Global connector list for drm_connector_find_by_fwnode().
+ * Note drm_connector_[un]register() first take connector->lock and then
+ * take the connector_list_lock.
+ */
+static DEFINE_MUTEX(connector_list_lock);
+static LIST_HEAD(connector_list);
+
struct drm_conn_prop_enum_list {
int type;
const char *name;
@@ -267,6 +275,7 @@ int drm_connector_init(struct drm_device *dev,
goto out_put_type_id;
}
+ INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
@@ -474,6 +483,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
+ fwnode_handle_put(connector->fwnode);
+ connector->fwnode = NULL;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
@@ -532,6 +543,9 @@ int drm_connector_register(struct drm_connector *connector)
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(connector->dev);
+ mutex_lock(&connector_list_lock);
+ list_add_tail(&connector->global_connector_list_entry, &connector_list);
+ mutex_unlock(&connector_list_lock);
goto unlock;
err_debugfs:
@@ -560,6 +574,10 @@ void drm_connector_unregister(struct drm_connector *connector)
return;
}
+ mutex_lock(&connector_list_lock);
+ list_del_init(&connector->global_connector_list_entry);
+ mutex_unlock(&connector_list_lock);
+
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -1602,7 +1620,7 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
* connectors.
*
* Atomic drivers should use drm_connector_attach_scaling_mode_property()
- * instead to correctly assign &drm_connector_state.picture_aspect_ratio
+ * instead to correctly assign &drm_connector_state.scaling_mode
* in the atomic state.
*/
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
@@ -1722,7 +1740,7 @@ EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
*
* This is used to add support for scaling mode to atomic drivers.
- * The scaling mode will be set to &drm_connector_state.picture_aspect_ratio
+ * The scaling mode will be set to &drm_connector_state.scaling_mode
* and can be used from &drm_connector_helper_funcs->atomic_check for validation.
*
* This is the atomic version of drm_mode_create_scaling_mode_property().
@@ -2543,6 +2561,67 @@ out:
return ret;
}
+/**
+ * drm_connector_find_by_fwnode - Find a connector based on the associated fwnode
+ * @fwnode: fwnode for which to find the matching drm_connector
+ *
+ * This functions looks up a drm_connector based on its associated fwnode. When
+ * a connector is found a reference to the connector is returned. The caller must
+ * call drm_connector_put() to release this reference when it is done with the
+ * connector.
+ *
+ * Returns: A reference to the found connector or an ERR_PTR().
+ */
+struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct drm_connector *connector, *found = ERR_PTR(-ENODEV);
+
+ if (!fwnode)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&connector_list_lock);
+
+ list_for_each_entry(connector, &connector_list, global_connector_list_entry) {
+ if (connector->fwnode == fwnode ||
+ (connector->fwnode && connector->fwnode->secondary == fwnode)) {
+ drm_connector_get(connector);
+ found = connector;
+ break;
+ }
+ }
+
+ mutex_unlock(&connector_list_lock);
+
+ return found;
+}
+
+/**
+ * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
+ * @connector: connector to report the event on
+ *
+ * On some hardware a hotplug event notification may come from outside the display
+ * driver / device. An example of this is some USB Type-C setups where the hardware
+ * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
+ * status bit to the GPU's DP HPD pin.
+ *
+ * This function can be used to report these out-of-band events after obtaining
+ * a drm_connector reference through calling drm_connector_find_by_fwnode().
+ */
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
+{
+ struct drm_connector *connector;
+
+ connector = drm_connector_find_by_fwnode(connector_fwnode);
+ if (IS_ERR(connector))
+ return;
+
+ if (connector->funcs->oob_hotplug_event)
+ connector->funcs->oob_hotplug_event(connector);
+
+ drm_connector_put(connector);
+}
+EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
+
/**
* DOC: Tile group
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index edb772947cb4..63279e984342 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -58,6 +58,7 @@ struct drm_property;
struct edid;
struct kref;
struct work_struct;
+struct fwnode_handle;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -186,6 +187,7 @@ int drm_connector_set_obj_prop(struct drm_mode_object *obj,
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
+struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode);
/* IOCTL */
int drm_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6325877c5fd6..53b342c058be 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -100,122 +100,127 @@ struct detailed_mode_closure {
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
+#define EDID_QUIRK(vend, product_id, _quirks) \
+{ \
+ .panel_id = drm_edid_encode_panel_id(vend, product_id), \
+ .quirks = _quirks \
+}
+
static const struct edid_quirk {
- char vendor[4];
- int product_id;
+ u32 panel_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK("ACR", 44358, EDID_QUIRK_PREFER_LARGE_60),
/* Acer F51 */
- { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK("API", 0x7602, EDID_QUIRK_PREFER_LARGE_60),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK("AEO", 0, EDID_QUIRK_FORCE_6BPC),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK("BOE", 0x78b, EDID_QUIRK_FORCE_6BPC),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK("CPT", 0x17df, EDID_QUIRK_FORCE_6BPC),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK("SDC", 0x3652, EDID_QUIRK_FORCE_6BPC),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK("BOE", 0x0771, EDID_QUIRK_FORCE_6BPC),
/* Belinea 10 15 55 */
- { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
- { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK("MAX", 1516, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK("MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60),
/* Envision Peripherals, Inc. EN-7100e */
- { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ EDID_QUIRK("EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
/* Envision EN2028 */
- { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK("EPI", 8232, EDID_QUIRK_PREFER_LARGE_60),
/* Funai Electronics PM36B */
- { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM },
+ EDID_QUIRK("FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+ EDID_QUIRK("LGD", 764, EDID_QUIRK_FORCE_10BPC),
/* LG Philips LCD LP154W01-A5 */
- { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ EDID_QUIRK("LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK("LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
/* Samsung SyncMaster 205BW. Note: irony */
- { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ EDID_QUIRK("SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP),
/* Samsung SyncMaster 22[5-6]BW */
- { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
- { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK("SAM", 596, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK("SAM", 638, EDID_QUIRK_PREFER_LARGE_60),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
+ EDID_QUIRK("SNY", 0x2541, EDID_QUIRK_FORCE_12BPC),
/* ViewSonic VA2026w */
- { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+ EDID_QUIRK("VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
/* Medion MD 30217 PG */
- { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ EDID_QUIRK("MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
/* Lenovo G50 */
- { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK("SDC", 18514, EDID_QUIRK_FORCE_6BPC),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+ EDID_QUIRK("SEC", 0xd033, EDID_QUIRK_FORCE_8BPC),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ EDID_QUIRK("ETR", 13896, EDID_QUIRK_FORCE_8BPC),
/* Valve Index Headset */
- { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91be, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP),
/* HTC Vive and Vive Pro VR Headsets */
- { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
- { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("OVR", 0x0001, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("OVR", 0x0003, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("OVR", 0x0004, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("OVR", 0x0012, EDID_QUIRK_NON_DESKTOP),
/* Windows Mixed Reality Headsets */
- { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
- { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
- { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
- { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
- { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
- { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
- { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
- { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("HPN", 0x3515, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("LEN", 0x0408, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("LEN", 0xb800, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("SEC", 0x144a, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK("AUS", 0xc102, EDID_QUIRK_NON_DESKTOP),
/* Sony PlayStation VR Headset */
- { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("SNY", 0x0704, EDID_QUIRK_NON_DESKTOP),
/* Sensics VR Headsets */
- { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("SEN", 0x1019, EDID_QUIRK_NON_DESKTOP),
/* OSVR HDK and HDK2 VR Headsets */
- { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK("SVR", 0x1019, EDID_QUIRK_NON_DESKTOP),
};
/*
@@ -1905,6 +1910,44 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
+static struct edid *drm_do_get_edid_base_block(
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data, bool *edid_corrupt, int *null_edid_counter)
+{
+ int i;
+ void *edid;
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (edid == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(edid, 0, false, edid_corrupt))
+ break;
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
+ if (null_edid_counter)
+ (*null_edid_counter)++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ return edid;
+
+carp:
+ kfree(edid);
+ return ERR_PTR(-EINVAL);
+
+out:
+ kfree(edid);
+ return NULL;
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1938,25 +1981,16 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
if (override)
return override;
- if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ edid = (u8 *)drm_do_get_edid_base_block(get_edid_block, data,
+ &connector->edid_corrupt,
+ &connector->null_edid_counter);
+ if (IS_ERR_OR_NULL(edid)) {
+ if (IS_ERR(edid))
+ connector_bad_edid(connector, edid, 1);
return NULL;
-
- /* base block fetch */
- for (i = 0; i < 4; i++) {
- if (get_edid_block(data, edid, 0, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(edid, 0, false,
- &connector->edid_corrupt))
- break;
- if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
- connector->null_edid_counter++;
- goto carp;
- }
}
- if (i == 4)
- goto carp;
- /* if there's no extensions, we're done */
+ /* if there's no extensions or no connector, we're done */
valid_extensions = edid[0x7e];
if (valid_extensions == 0)
return (struct edid *)edid;
@@ -2010,8 +2044,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
return (struct edid *)edid;
-carp:
- connector_bad_edid(connector, edid, 1);
out:
kfree(edid);
return NULL;
@@ -2060,6 +2092,72 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+static u32 edid_extract_panel_id(const struct edid *edid)
+{
+ /*
+ * We represent the ID as a 32-bit number so it can easily be compared
+ * with "==".
+ *
+ * NOTE that we deal with endianness differently for the top half
+ * of this ID than for the bottom half. The bottom half (the product
+ * id) gets decoded as little endian by the EDID_PRODUCT_ID because
+ * that's how everyone seems to interpret it. The top half (the mfg_id)
+ * gets stored as big endian because that makes
+ * drm_edid_encode_panel_id() and drm_edid_decode_panel_id() easier
+ * to write (it's easier to extract the ASCII). It doesn't really
+ * matter, though, as long as the number here is unique.
+ */
+ return (u32)edid->mfg_id[0] << 24 |
+ (u32)edid->mfg_id[1] << 16 |
+ (u32)EDID_PRODUCT_ID(edid);
+}
+
+/**
+ * drm_edid_get_panel_id - Get a panel's ID through DDC
+ * @adapter: I2C adapter to use for DDC
+ *
+ * This function reads the first block of the EDID of a panel and (assuming
+ * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
+ * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
+ * supposed to be different for each different modem of panel.
+ *
+ * This function is intended to be used during early probing on devices where
+ * more than one panel might be present. Because of its intended use it must
+ * assume that the EDID of the panel is correct, at least as far as the ID
+ * is concerned (in other words, we don't process any overrides here).
+ *
+ * NOTE: it's expected that this function and drm_do_get_edid() will both
+ * be read the EDID, but there is no caching between them. Since we're only
+ * reading the first block, hopefully this extra overhead won't be too big.
+ *
+ * Return: A 32-bit ID that should be different for each make/model of panel.
+ * See the functions drm_edid_encode_panel_id() and
+ * drm_edid_decode_panel_id() for some details on the structure of this
+ * ID.
+ */
+
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ u32 panel_id;
+
+ edid = drm_do_get_edid_base_block(drm_do_probe_ddc_edid, adapter,
+ NULL, NULL);
+
+ /*
+ * There are no manufacturer IDs of 0, so if there is a problem reading
+ * the EDID then we'll just return 0.
+ */
+ if (IS_ERR_OR_NULL(edid))
+ return 0;
+
+ panel_id = edid_extract_panel_id(edid);
+ kfree(edid);
+
+ return panel_id;
+}
+EXPORT_SYMBOL(drm_edid_get_panel_id);
+
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
* @connector: connector we're probing
@@ -2104,25 +2202,6 @@ EXPORT_SYMBOL(drm_edid_duplicate);
/*** EDID parsing ***/
/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(const struct edid *edid, const char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
*
@@ -2130,14 +2209,13 @@ static bool edid_vendor(const struct edid *edid, const char *vendor)
*/
static u32 edid_get_quirks(const struct edid *edid)
{
+ u32 panel_id = edid_extract_panel_id(edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ if (quirk->panel_id == panel_id)
return quirk->quirks;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a61946374c82..0e0986dfbe0c 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,6 +10,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -162,6 +166,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
return PTR_ERR(pages);
}
+ /*
+ * TODO: Allocating WC pages which are correctly flushed is only
+ * supported on x86. Ideal solution would be a GFP_WC flag, which also
+ * ttm_pool.c could use.
+ */
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
+#endif
+
shmem->pages = pages;
return 0;
@@ -203,6 +217,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
if (--shmem->pages_use_count > 0)
return;
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+#endif
+
drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
@@ -542,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
} else {
page = shmem->pages[page_offset];
- ret = vmf_insert_page(vma, vmf->address, page);
+ ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
mutex_unlock(&shmem->pages_lock);
@@ -612,7 +631,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return ret;
}
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 43cf7e887d1a..bfa386b98134 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
- ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
kfree(tt);
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d29907955ff7..5d82891c3222 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
- if (err)
- return err;
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
- return 0;
+ return err;
}
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index be4a52dc4d6f..8b8744dcf691 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
-/**
- * drm_ioctl_permit - Check ioctl permissions against caller
- *
- * @flags: ioctl permission flags.
- * @file_priv: Pointer to struct drm_file identifying the caller.
- *
- * Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions.
- *
- * Returns:
- * Zero if allowed, -EACCES otherwise.
- */
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
-EXPORT_SYMBOL(drm_ioctl_permit);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \
@@ -725,7 +712,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls)
/**
* DOC: driver specific ioctls
@@ -834,8 +821,8 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
- if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
- return -ENOTTY;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+ return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index f933da1656eb..47e92400548d 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- /*
- * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable
- * console.
- */
- if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
- IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
- !IS_ENABLED(CONFIG_EXPERT))
- request_module_nowait("fbcon");
-
return drm_dp_aux_dev_init();
}
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 997b8827fed2..37c34146eea8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -231,6 +231,9 @@ EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
* return either the associated struct drm_panel or drm_bridge device. Either
* @panel or @bridge must not be NULL.
*
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_of_get_bridge() instead.
+ *
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_find_panel_or_bridge(const struct device_node *np,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index f6bdec7fa925..62e8ccc7ab9c 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
+ .width = 1280,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -134,6 +140,20 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Chuwi HiBook (CWI514) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Chuwi Hi10 Pro (CWI529) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -193,6 +213,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
+ }, { /* KD Kurio Smart C15200 2-in-1 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
@@ -211,10 +238,15 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* Lenovo Ideapad D330 */
+ }, { /* Lenovo Ideapad D330-10IGM (HD) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
@@ -225,6 +257,19 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
+ }, { /* Samsung GalaxyBook 10.6 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
+ },
+ .driver_data = (void *)&lcd1280x1920_rightside_up,
+ }, { /* Valve Steam Deck */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1d009494af8b..deb23dbec8b5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -807,8 +807,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg;
- struct scatterlist *sge;
size_t max_segment = 0;
+ int err;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sg)
@@ -818,13 +818,12 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
max_segment = dma_max_mapping_size(dev->dev);
if (max_segment == 0)
max_segment = UINT_MAX;
- sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
- max_segment,
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sge)) {
+ err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (err) {
kfree(sg);
- sg = ERR_CAST(sge);
+ sg = ERR_PTR(err);
}
return sg;
}
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 111b932cf2a9..f783d4963d4b 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,7 +25,7 @@
#define DEBUG /* for pr_debug() */
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 968a9560b4aa..76ff6ec3421b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -10,6 +10,7 @@
* Copyright (c) 2003-2004 IBM Corp.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -50,8 +51,45 @@ static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
+static struct device_type drm_sysfs_device_connector = {
+ .name = "drm_connector",
+};
+
struct class *drm_class;
+#ifdef CONFIG_ACPI
+static bool drm_connector_acpi_bus_match(struct device *dev)
+{
+ return dev->type == &drm_sysfs_device_connector;
+}
+
+static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev)
+{
+ struct drm_connector *connector = to_drm_connector(dev);
+
+ return to_acpi_device_node(connector->fwnode);
+}
+
+static struct acpi_bus_type drm_connector_acpi_bus = {
+ .name = "drm_connector",
+ .match = drm_connector_acpi_bus_match,
+ .find_companion = drm_connector_acpi_find_companion,
+};
+
+static void drm_sysfs_acpi_register(void)
+{
+ register_acpi_bus_type(&drm_connector_acpi_bus);
+}
+
+static void drm_sysfs_acpi_unregister(void)
+{
+ unregister_acpi_bus_type(&drm_connector_acpi_bus);
+}
+#else
+static void drm_sysfs_acpi_register(void) { }
+static void drm_sysfs_acpi_unregister(void) { }
+#endif
+
static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -85,6 +123,8 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
+
+ drm_sysfs_acpi_register();
return 0;
}
@@ -97,11 +137,17 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
+ drm_sysfs_acpi_unregister();
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
}
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/*
* Connector properties
*/
@@ -273,27 +319,47 @@ static const struct attribute_group *connector_dev_groups[] = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct device *kdev;
+ int r;
if (connector->kdev)
return 0;
- connector->kdev =
- device_create_with_groups(drm_class, dev->primary->kdev, 0,
- connector, connector_dev_groups,
- "card%d-%s", dev->primary->index,
- connector->name);
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ device_initialize(kdev);
+ kdev->class = drm_class;
+ kdev->type = &drm_sysfs_device_connector;
+ kdev->parent = dev->primary->kdev;
+ kdev->groups = connector_dev_groups;
+ kdev->release = drm_sysfs_release;
+ dev_set_drvdata(kdev, connector);
+
+ r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name);
+ if (r)
+ goto err_free;
+
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
- if (IS_ERR(connector->kdev)) {
- DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
- return PTR_ERR(connector->kdev);
+ r = device_add(kdev);
+ if (r) {
+ drm_err(dev, "failed to register connector device: %d\n", r);
+ goto err_free;
}
+ connector->kdev = kdev;
+
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
return 0;
+
+err_free:
+ put_device(kdev);
+ return r;
}
void drm_sysfs_connector_remove(struct drm_connector *connector)
@@ -374,11 +440,6 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_status_event);
-static void drm_sysfs_release(struct device *dev)
-{
- kfree(dev);
-}
-
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 76d38561c910..cf741c5c82d2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
- etnaviv_iommu_context_get(mmu_context);
- gpu->mmu_context = mmu_context;
+ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8f1b5af47dd6..f0b2540e60e4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
- etnaviv_iommu_context_get(mmu_context);
- mapping->context = mmu_context;
+ mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 4dd7d9d541c0..486259e154af 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
submit->ctx = file->driver_priv;
- etnaviv_iommu_context_get(submit->ctx->mmu);
- submit->mmu_context = submit->ctx->mmu;
+ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index c297fffe06eb..cc5b07f86346 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
+ gpu->fe_running = false;
+ gpu->exec_state = -1;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
+
return 0;
}
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
+
+ gpu->fe_running = true;
}
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping);
u16 prefetch;
+ u32 address;
/* setup the MMU */
- etnaviv_iommu_restore(gpu, gpu->mmu_context);
+ etnaviv_iommu_restore(gpu, context);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
}
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
- gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- if (!gpu->mmu_context) {
- etnaviv_iommu_context_get(submit->mmu_context);
- gpu->mmu_context = submit->mmu_context;
- etnaviv_gpu_start_fe_idleloop(gpu);
- } else {
- etnaviv_iommu_context_get(gpu->mmu_context);
- submit->prev_mmu_context = gpu->mmu_context;
- }
+ if (!gpu->fe_running)
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->initialized && gpu->mmu_context) {
+ if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/
etnaviv_gpu_wait_idle(gpu, 100);
- etnaviv_iommu_context_put(gpu->mmu_context);
- gpu->mmu_context = NULL;
+ gpu->fe_running = false;
}
gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+
if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8ea48697d132..1c75c8ed5bce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
bool initialized;
+ bool fe_running;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 1a7c89a67bea..afe5dd6a9925 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
/* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f8bf488e9d71..d664ae29ae20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
+ if (gpu->mmu_context)
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = etnaviv_iommu_context_get(context);
+
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index dab1b58006d8..9fb1a2aadbcb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m);
+ etnaviv_iommu_context_put(m->context);
m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index d1d6902fd13b..e4a0b7d09c2e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{
kref_get(&ctx->refcount);
+ return ctx;
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index feb6da1b6ceb..180bb633d5c5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -163,6 +163,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
if (ret)
goto out_unlock;
+ drm_sched_job_arm(&submit->sched_job);
+
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
@@ -176,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
- drm_sched_entity_push_job(&submit->sched_job, sched_entity);
+ drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
mutex_unlock(&submit->gpu->fence_lock);
diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig
index 1c8601bf4d91..9c1e61f9eec3 100644
--- a/drivers/gpu/drm/gud/Kconfig
+++ b/drivers/gpu/drm/gud/Kconfig
@@ -2,7 +2,7 @@
config DRM_GUD
tristate "GUD USB Display"
- depends on DRM && USB
+ depends on DRM && USB && MMU
select LZ4_COMPRESS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 642a5b5a1b81..335ba9f43d8f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d8162951b78f..9903a78df896 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2251,6 +2251,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return BIT(PIPE_A) | BIT(PIPE_B);
+ else
+ return BIT(PIPE_A);
+}
+
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2268,8 +2277,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
if (!pipe_config->splitter.enable)
return;
- /* Splitter enable is supported for pipe A only. */
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+ if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
@@ -2301,10 +2309,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
return;
if (crtc_state->splitter.enable) {
- /* Splitter enable is supported for pipe A only. */
- if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
- return;
-
dss1 |= SPLITTER_ENABLE;
dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
if (crtc_state->splitter.link_count == 2)
@@ -4663,12 +4667,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- /* Splitter enable for eDP MSO is limited to certain pipes. */
- if (dig_port->dp.mso_link_count) {
- encoder->pipe_mask = BIT(PIPE_A);
- if (IS_ALDERLAKE_P(dev_priv))
- encoder->pipe_mask |= BIT(PIPE_B);
- }
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index a35424133f1b..134a6acbd8fb 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -5841,16 +5841,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPEMISC_DITHER_6_BPC;
+ val |= PIPEMISC_6_BPC;
break;
case 24:
- val |= PIPEMISC_DITHER_8_BPC;
+ val |= PIPEMISC_8_BPC;
break;
case 30:
- val |= PIPEMISC_DITHER_10_BPC;
+ val |= PIPEMISC_10_BPC;
break;
case 36:
- val |= PIPEMISC_DITHER_12_BPC;
+ /* Port output 12BPC defined for ADLP+ */
+ if (DISPLAY_VER(dev_priv) > 12)
+ val |= PIPEMISC_12_BPC_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -5903,15 +5905,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
- switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
- case PIPEMISC_DITHER_6_BPC:
+ switch (tmp & PIPEMISC_BPC_MASK) {
+ case PIPEMISC_6_BPC:
return 18;
- case PIPEMISC_DITHER_8_BPC:
+ case PIPEMISC_8_BPC:
return 24;
- case PIPEMISC_DITHER_10_BPC:
+ case PIPEMISC_10_BPC:
return 30;
- case PIPEMISC_DITHER_12_BPC:
- return 36;
+ /*
+ * PORT OUTPUT 12 BPC defined for ADLP+.
+ *
+ * TODO:
+ * For previous platforms with DSI interface, bits 5:7
+ * are used for storing pipe_bpp irrespective of dithering.
+ * Since the value of 12 BPC is not defined for these bits
+ * on older platforms, need to find a workaround for 12 BPC
+ * MIPI DSI HW readout.
+ */
+ case PIPEMISC_12_BPC_ADLP:
+ if (DISPLAY_VER(dev_priv) > 12)
+ return 36;
+ fallthrough;
default:
MISSING_CASE(tmp);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 5da293369f30..cce1a926fcc1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -6329,13 +6329,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
IS_BROXTON(i915)) {
bxt_enable_dc9(i915);
- /* Tweaked Wa_14010685332:icp,jsp,mcc */
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
- intel_de_rmw(i915, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
}
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct drm_i915_private *i915)
@@ -6344,13 +6344,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
IS_BROXTON(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
- /* Tweaked Wa_14010685332:icp,jsp,mcc */
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
void intel_display_power_suspend(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 75d4ebc66941..abe3d61b6243 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
- sizeof(intel_dp->edp_dpcd))
+ sizeof(intel_dp->edp_dpcd)) {
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
+ intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+ }
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
@@ -3842,23 +3845,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
return;
if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
- drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
return;
- }
if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
- drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
return;
- }
if (val & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 08bceae40aa8..508a514c5e37 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
-EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
{
@@ -849,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
}
if (ret)
- intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+ ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index cff72679ad7c..9ccf4b29b82e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ if (ctx->syncobj)
+ drm_syncobj_put(ctx->syncobj);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
if (vm)
i915_vm_close(vm);
- if (ctx->syncobj)
- drm_syncobj_put(ctx->syncobj);
-
ctx->file_priv = ERR_PTR(-EBADF);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 13b217f75055..e5ae9c06510c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (is_swiotlb_active()) {
+ if (is_swiotlb_active(obj->base.dev->dev)) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 771eb2963123..f0a61a9474fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -214,7 +214,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(i915_tt);
}
@@ -382,7 +381,6 @@ i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
- struct scatterlist *sg;
struct sg_table *st;
int ret;
@@ -393,13 +391,13 @@ static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
if (!st)
return ERR_PTR(-ENOMEM);
- sg = __sg_alloc_table_from_pages
- (st, ttm->pages, ttm->num_pages, 0,
- (unsigned long)ttm->num_pages << PAGE_SHIFT,
- i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
+ ret = sg_alloc_table_from_pages_segment(st,
+ ttm->pages, ttm->num_pages,
+ 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
+ i915_sg_segment_size(), GFP_KERNEL);
+ if (ret) {
kfree(st);
- return ERR_CAST(sg);
+ return ERR_PTR(ret);
}
ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 468a7a617fbf..8ea0fa665e53 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -131,7 +131,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
unsigned int sg_page_sizes;
- struct scatterlist *sg;
struct page **pvec;
int ret;
@@ -148,13 +147,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = obj->userptr.pvec;
alloc_table:
- sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
- num_pages << PAGE_SHIFT, max_segment,
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+ num_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (ret)
goto err;
- }
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index ffae7df5e4d7..4a6bb64c3a35 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL;
goto out_import;
}
- import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
PTR_ERR(import));
err = PTR_ERR(import);
+ } else {
+ err = 0;
}
dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
err = PTR_ERR(import);
goto out_dmabuf;
}
+ import_obj = to_intel_bo(import);
if (import == &obj->base) {
pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import;
}
- import_obj = to_intel_bo(import);
-
i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj);
if (err) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index b20f5621f62b..a2c34e5a1c54 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
return I915_MMAP_TYPE_GTT;
}
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+ unsigned long size)
+{
+ if (HAS_LMEM(i915)) {
+ struct intel_memory_region *sys_region =
+ i915->mm.regions[INTEL_REGION_SMEM];
+
+ return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+ }
+
+ return i915_gem_object_create_internal(i915, size);
+}
+
static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size,
int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
u64 offset;
int ret;
- obj = i915_gem_object_create_internal(i915, size);
+ obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj);
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm_node *hole, *next;
int loop, err = 0;
u64 offset;
+ int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */
disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
/* Too large */
- if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL;
goto out;
}
/* Fill the hole, further allocation attempts should then fail */
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj;
}
- if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL;
goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
- if (HAS_LMEM(i915))
+ if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED)
return false;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index 51dbe0e3294e..d2969f68dd64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -6,7 +6,7 @@
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
-#include <stddef.h>
+#include <linux/stddef.h>
struct intel_engine_cs;
struct intel_gt;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d812b27835f8..591a5224287e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
- return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+ return freq;
}
static u32 intel_rps_get_req(u32 pureq)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index c4a126c8caef..1257f4f11e66 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu)
i915_vma_put(timeline->hwsp_ggtt);
i915_active_fini(&timeline->active);
+
+ /*
+ * A small race exists between intel_gt_retire_requests_timeout and
+ * intel_timeline_exit which could result in the syncmap not getting
+ * free'd. Rather than work to hard to seal this race, simply cleanup
+ * the syncmap on fini.
+ */
+ i915_syncmap_free(&timeline->sync);
+
kfree(timeline);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index b104fb7607eb..86c318516e14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
__uc_free_load_err_log(uc);
}
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
- return intel_guc_ct_enabled(&guc->ct);
-}
-
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
/* we need communication to be enabled to reply to GuC */
- GEM_BUG_ON(!guc_communication_enabled(guc));
+ GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
struct drm_i915_private *i915 = gt->i915;
int ret;
- GEM_BUG_ON(guc_communication_enabled(guc));
+ GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return 0;
/* Make sure we enable communication if and only if it's disabled */
- GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+ GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication)
guc_enable_communication(guc);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 06024d321a1a..cde0a477fb49 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3149,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(_MMIO(0xb110), D_BDW);
+ MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
D_BDW_PLUS, NULL, force_nonpriv_write);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1ac98f8aba31..7efa386449d1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -885,7 +885,7 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int intel_vgpu_open(struct mdev_device *mdev)
+static int intel_vgpu_open_device(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
@@ -1004,7 +1004,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
vgpu->handle = 0;
}
-static void intel_vgpu_release(struct mdev_device *mdev)
+static void intel_vgpu_close_device(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
@@ -1753,8 +1753,8 @@ static struct mdev_parent_ops intel_vgpu_ops = {
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
- .open = intel_vgpu_open,
- .release = intel_vgpu_release,
+ .open_device = intel_vgpu_open_device,
+ .close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
.write = intel_vgpu_write,
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index b8ac80765461..f776c470914d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
+ {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0f08bcfbe964..9cf6ac575de1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) >= 12) {
int i;
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ /*
+ * SFC_DONE resides in the VD forcewake domain, so it
+ * only exists if the corresponding VCS engine is
+ * present.
+ */
+ if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ continue;
+
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
gt->sfc_done[i]);
+ }
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
}
@@ -1598,6 +1607,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
if (GRAPHICS_VER(i915) >= 12) {
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ /*
+ * SFC_DONE resides in the VD forcewake domain, so it
+ * only exists if the corresponding VCS engine is
+ * present.
+ */
+ if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ continue;
+
gt->sfc_done[i] =
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d336218b67..9bc4f4a8e12e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3079,24 +3079,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- /*
- * Wa_14010685332:cnp/cmp,tgp,adp
- * TODO: Clarify which platforms this applies to
- * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
- * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
- */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
- (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
- SBCLK_RUN_REFCLK_DIS);
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
- }
-}
-
static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3130,7 +3112,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
- cnp_display_clock_wa(dev_priv);
}
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3174,8 +3155,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
-
- cnp_display_clock_wa(dev_priv);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 167eaa87501b..664970f2bc62 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6203,11 +6203,17 @@ enum {
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
-#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
-#define PIPEMISC_DITHER_8_BPC (0 << 5)
-#define PIPEMISC_DITHER_10_BPC (1 << 5)
-#define PIPEMISC_DITHER_6_BPC (2 << 5)
-#define PIPEMISC_DITHER_12_BPC (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define PIPEMISC_BPC_MASK (7 << 5)
+#define PIPEMISC_8_BPC (0 << 5)
+#define PIPEMISC_10_BPC (1 << 5)
+#define PIPEMISC_6_BPC (2 << 5)
+#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */
#define PIPEMISC_DITHER_ENABLE (1 << 4)
#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ef114b6aa691..846c1aae69c8 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -683,7 +683,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
}
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
width = ipu_src_rect_width(new_state);
height = drm_rect_height(&new_state->src) >> 16;
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index be192f6da9f1..1c2f4799f421 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -202,6 +202,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
unsigned long status, val, val1;
int plane_id, dma0_state, dma1_state;
struct kmb_drm_private *kmb = to_kmb(dev);
+ u32 ctrl = 0;
status = kmb_read_lcd(kmb, LCD_INT_STATUS);
@@ -226,6 +227,19 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
kmb->plane_status[plane_id].ctrl);
+ ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+ if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
+ LCD_CTRL_VL2_ENABLE |
+ LCD_CTRL_GL1_ENABLE |
+ LCD_CTRL_GL2_ENABLE))) {
+ /* If no LCD layers are using DMA,
+ * then disable DMA pipelined AXI read
+ * transactions.
+ */
+ kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
+ LCD_CTRL_PIPELINE_DMA);
+ }
+
kmb->plane_status[plane_id].disable = false;
}
}
@@ -425,10 +439,10 @@ static const struct drm_driver kmb_driver = {
.fops = &fops,
DRM_GEM_CMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
- .desc = "KEEMBAY DISPLAY DRIVER ",
- .date = "20201008",
- .major = 1,
- .minor = 0,
+ .desc = "KEEMBAY DISPLAY DRIVER",
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
};
static int kmb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index 02e806712a64..ebbaa5f422d5 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -15,6 +15,11 @@
#define KMB_MAX_HEIGHT 1080 /*Max height in pixels */
#define KMB_MIN_WIDTH 1920 /*Max width in pixels */
#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */
+
+#define DRIVER_DATE "20210223"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 1
+
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index d5b6195856d1..ecee6782612d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -427,8 +427,14 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
- /* FIXME no doc on how to set output format,these values are
- * taken from the Myriadx tests
+ /* Enable pipeline AXI read transactions for the DMA
+ * after setting graphics layers. This must be done
+ * in a separate write cycle.
+ */
+ kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
+ /* FIXME no doc on how to set output format, these values are taken
+ * from the Myriadx tests
*/
out_format |= LCD_OUTF_FORMAT_RGB888;
@@ -526,6 +532,11 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
plane->id = i;
}
+ /* Disable pipeline AXI read transactions for the DMA
+ * prior to setting graphics layers
+ */
+ kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
+
return primary;
cleanup:
drmm_kfree(drm, plane);
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index de62966243cd..640acc060467 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
+ return drm_sched_job_add_implicit_dependencies(&task->base,
+ &bo->base.base,
+ write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
if (err)
return err;
- err = drm_gem_fence_array_add(&submit->task->deps, fence);
+ err = drm_sched_job_add_dependency(&submit->task->base, fence);
if (err) {
dma_fence_put(fence);
return err;
@@ -359,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
goto err_out2;
}
- fence = lima_sched_context_queue_task(
- submit->ctx->context + submit->pipe, submit->task);
+ fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index dba8329937a3..99d5f6f1a882 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -129,27 +129,20 @@ int lima_sched_task_init(struct lima_sched_task *task,
return err;
}
+ drm_sched_job_arm(&task->base);
+
task->num_bos = num_bos;
task->vm = lima_vm_get(vm);
- xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
-
return 0;
}
void lima_sched_task_fini(struct lima_sched_task *task)
{
- struct dma_fence *fence;
- unsigned long index;
int i;
drm_sched_job_cleanup(&task->base);
- xa_for_each(&task->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&task->deps);
-
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
drm_gem_object_put(&task->bos[i]->base.base);
@@ -175,27 +168,15 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe,
drm_sched_entity_fini(&context->base);
}
-struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
- struct lima_sched_task *task)
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
trace_lima_task_submit(task);
- drm_sched_entity_push_job(&task->base, &context->base);
+ drm_sched_entity_push_job(&task->base);
return fence;
}
-static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *entity)
-{
- struct lima_sched_task *task = to_lima_task(job);
-
- if (!xa_empty(&task->deps))
- return xa_erase(&task->deps, task->last_dep++);
-
- return NULL;
-}
-
static int lima_pm_busy(struct lima_device *ldev)
{
int ret;
@@ -471,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job)
}
static const struct drm_sched_backend_ops lima_sched_ops = {
- .dependency = lima_sched_dependency,
.run_job = lima_sched_run_job,
.timedout_job = lima_sched_timedout_job,
.free_job = lima_sched_free_job,
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 90f03c48ef4a..6a11764d87b3 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -23,9 +23,6 @@ struct lima_sched_task {
struct lima_vm *vm;
void *frame;
- struct xarray deps;
- unsigned long last_dep;
-
struct lima_bo **bos;
int num_bos;
@@ -98,8 +95,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
atomic_t *guilty);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
-struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
- struct lima_sched_task *task);
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index e60566a5739c..5b5afc6aaf8e 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -276,7 +276,6 @@ static int mcde_probe(struct platform_device *pdev)
struct drm_device *drm;
struct mcde *mcde;
struct component_match *match = NULL;
- struct resource *res;
u32 pid;
int irq;
int ret;
@@ -344,8 +343,7 @@ static int mcde_probe(struct platform_device *pdev)
goto clk_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mcde->regs = devm_ioremap_resource(dev, res);
+ mcde->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcde->regs)) {
dev_err(dev, "no MCDE regs\n");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 180ebbccbeda..5651734ce977 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1169,7 +1169,6 @@ static int mcde_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mcde_dsi *d;
struct mipi_dsi_host *host;
- struct resource *res;
u32 dsi_id;
int ret;
@@ -1187,8 +1186,7 @@ static int mcde_dsi_probe(struct platform_device *pdev)
return PTR_ERR(d->prcmu);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- d->regs = devm_ioremap_resource(dev, res);
+ d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(d->regs))
return PTR_ERR(d->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 64b45284766a..f46d4ab73d6a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -18,6 +18,7 @@
#define DISP_AAL_EN 0x0000
#define AAL_EN BIT(0)
#define DISP_AAL_SIZE 0x0030
+#define DISP_AAL_OUTPUT_SIZE 0x04d8
struct mtk_disp_aal_data {
@@ -57,6 +58,7 @@ void mtk_aal_config(struct device *dev, unsigned int w,
struct mtk_disp_aal *aal = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_OUTPUT_SIZE);
}
void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6f4c80bbc0eb..473f5bb5cbad 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
static int mtk_disp_color_remove(struct platform_device *pdev)
{
+ component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index fa9d79963cd3..5326989d5206 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
+ component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 96d35770c844..4554e2de1430 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct mtk_dpi *dpi = bridge->driver_private;
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
unsigned int out_bus_format;
out_bus_format = bridge_state->output_bus_cfg.format;
+ if (out_bus_format == MEDIA_BUS_FMT_FIXED)
+ if (dpi->conf->num_output_fmts)
+ out_bus_format = dpi->conf->output_fmts[0];
+
dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
bridge_state->input_bus_cfg.format,
bridge_state->output_bus_cfg.format);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index c5fb8fa914b0..5f81489fc60c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -627,13 +627,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- const struct drm_plane_helper_funcs *plane_helper_funcs =
- plane->helper_private;
if (!mtk_crtc->enabled)
return;
- plane_helper_funcs->atomic_update(plane, state);
mtk_drm_crtc_update_config(mtk_crtc, false);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 1667a7e7de38..734a1fb052df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
true, true);
}
+static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ struct mtk_plane_state *mtk_plane_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_gem_object *gem;
+ struct mtk_drm_gem_obj *mtk_gem;
+ unsigned int pitch, format;
+ dma_addr_t addr;
+
+ gem = fb->obj[0];
+ mtk_gem = to_mtk_gem_obj(gem);
+ addr = mtk_gem->dma_addr;
+ pitch = fb->pitches[0];
+ format = fb->format->format;
+
+ addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+ addr += (new_state->src.y1 >> 16) * pitch;
+
+ mtk_plane_state->pending.enable = true;
+ mtk_plane_state->pending.pitch = pitch;
+ mtk_plane_state->pending.format = format;
+ mtk_plane_state->pending.addr = addr;
+ mtk_plane_state->pending.x = new_state->dst.x1;
+ mtk_plane_state->pending.y = new_state->dst.y1;
+ mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
+ mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
+ mtk_plane_state->pending.rotation = new_state->rotation;
+}
+
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
- new_plane_state->pending.async_dirty = true;
+ mtk_plane_update_new_state(new_state, new_plane_state);
+ wmb(); /* Make sure the above parameters are set before update */
+ new_plane_state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, state);
}
@@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
- struct drm_crtc *crtc = new_state->crtc;
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_object *gem;
- struct mtk_drm_gem_obj *mtk_gem;
- unsigned int pitch, format;
- dma_addr_t addr;
- if (!crtc || WARN_ON(!fb))
+ if (!new_state->crtc || WARN_ON(!new_state->fb))
return;
if (!new_state->visible) {
@@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
return;
}
- gem = fb->obj[0];
- mtk_gem = to_mtk_gem_obj(gem);
- addr = mtk_gem->dma_addr;
- pitch = fb->pitches[0];
- format = fb->format->format;
-
- addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
- addr += (new_state->src.y1 >> 16) * pitch;
-
- mtk_plane_state->pending.enable = true;
- mtk_plane_state->pending.pitch = pitch;
- mtk_plane_state->pending.format = format;
- mtk_plane_state->pending.addr = addr;
- mtk_plane_state->pending.x = new_state->dst.x1;
- mtk_plane_state->pending.y = new_state->dst.y1;
- mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
- mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
- mtk_plane_state->pending.rotation = new_state->rotation;
+ mtk_plane_update_new_state(new_state, mtk_plane_state);
wmb(); /* Make sure the above parameters are set before update */
mtk_plane_state->pending.dirty = true;
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index bc0d60df04ae..7f41a33592c8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -206,8 +206,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->compat = match->compat;
priv->afbcd.ops = match->afbcd_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto free_drm;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 2ed87cfdd735..0afbd1e70bfc 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -978,7 +978,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct dw_hdmi_plat_data *dw_plat_data;
struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
- struct resource *res;
int irq;
int ret;
@@ -1042,8 +1041,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
+ meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 446e7961da48..0f3cafab8860 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -634,6 +634,11 @@
#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
+/* osd1 HDR */
+#define OSD1_HDR2_CTRL 0x38a0
+#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13)
+#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16)
+
/* osd2 scaler */
#define OSD2_VSC_PHASE_STEP 0x3d00
#define OSD2_VSC_INI_PHASE 0x3d01
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index aede0c67a57f..259f3e6bec90 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_load_matrix(priv);
- else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
true);
+ /* fix green/pink color distortion from vendor u-boot */
+ writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
+ OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
+ priv->io_base + _REG(OSD1_HDR2_CTRL));
+ }
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index 7c903cf19c0d..e9ae22b4f813 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -124,6 +124,7 @@ static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clo
unsigned int computed;
m = n = p = s = 0;
+ delta = 0xffffffff;
permitteddelta = clock * 5 / 1000;
for (testp = 8; testp > 0; testp /= 2) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 2e482cdd7b3c..b131fd376192 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -296,7 +296,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
static const struct dpu_mdp_cfg sm8250_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
- .base = 0x0, .len = 0x45C,
+ .base = 0x0, .len = 0x494,
.features = 0,
.highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 6b0a7bc87eb7..b466784d9822 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -45,20 +45,13 @@ static void dpu_mdss_irq(struct irq_desc *desc)
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
- unsigned int mapping;
int rc;
- mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
- hwirq);
- if (mapping == 0) {
- DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
- break;
- }
-
- rc = generic_handle_irq(mapping);
+ rc = generic_handle_domain_irq(dpu_mdss->irq_controller.domain,
+ hwirq);
if (rc < 0) {
- DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
- hwirq, mapping, rc);
+ DRM_ERROR("handle irq fail: irq=%lu rc=%d\n",
+ hwirq, rc);
break;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 09bd46ad820b..2f4895bcb0b0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -50,8 +50,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
while (intr) {
irq_hw_number_t hwirq = fls(intr) - 1;
- generic_handle_irq(irq_find_mapping(
- mdp5_mdss->irqcontroller.domain, hwirq));
+ generic_handle_domain_irq(mdp5_mdss->irqcontroller.domain, hwirq);
intr &= ~(1 << hwirq);
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index c22b07f68670..4c619307612c 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/kthread.h>
#include <linux/devcoredump.h>
-#include <stdarg.h>
#include "msm_kms.h"
#define MSM_DISP_SNAPSHOT_MAX_BLKS 10
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 21b6ac857710..cc2bb8295329 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -775,6 +775,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
dp_catalog->width_blanking);
dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 419fd4a14cbf..fbe4c2cd52a3 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -216,6 +216,7 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
+ dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
DRM_ERROR("DRM DP AUX register failed\n");
@@ -1314,6 +1315,9 @@ static int dp_pm_resume(struct device *dev)
else
dp->dp_display.is_connected = false;
+ dp_display_handle_plugged_change(g_dp_display,
+ dp->dp_display.is_connected);
+
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
dp->link->sink_count, dp->dp_display.is_connected,
dp->core_initialized, dp_display->power_on);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index e39a8e7ad843..54ca0817d807 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -309,11 +309,6 @@ struct msm_gem_submit {
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
- /* Array of struct dma_fence * to block on before submitting this job.
- */
- struct xarray deps;
- unsigned long last_dep;
-
/* Hw fence, which is created when the scheduler executes the job, and
* is signaled when the hw finishes (via seqno write from cmdstream)
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index fdc5367aecaa..924b01b9c105 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
-
kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
@@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
{
struct msm_gem_submit *submit =
container_of(kref, struct msm_gem_submit, ref);
- unsigned long index;
- struct dma_fence *fence;
unsigned i;
if (submit->fence_id) {
@@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
mutex_unlock(&submit->queue->lock);
}
- xa_for_each (&submit->deps, index, fence) {
- dma_fence_put(fence);
- }
-
- xa_destroy(&submit->deps);
-
dma_fence_put(submit->user_fence);
dma_fence_put(submit->hw_fence);
@@ -340,11 +330,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
return ret;
}
- if (no_implicit)
+ /* exclusive fences must be ordered */
+ if (no_implicit && !write)
continue;
- ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
- write);
+ ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+ obj,
+ write);
if (ret)
break;
}
@@ -588,7 +580,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (ret)
break;
- ret = drm_gem_fence_array_add(&submit->deps, fence);
+ ret = drm_sched_job_add_dependency(&submit->base, fence);
if (ret)
break;
@@ -798,7 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = drm_gem_fence_array_add(&submit->deps, in_fence);
+ ret = drm_sched_job_add_dependency(&submit->base, in_fence);
if (ret)
goto out_unlock;
}
@@ -878,6 +870,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ drm_sched_job_arm(&submit->base);
+
submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
/*
@@ -889,17 +883,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit->fence_id < 0) {
ret = submit->fence_id = 0;
submit->fence_id = 0;
- goto out;
}
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
struct sync_file *sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto out;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
}
submit_attach_object_fences(submit);
@@ -907,7 +900,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
- drm_sched_entity_push_job(&submit->base, &queue->entity);
+ drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index eed2a762e9dd..bcaddbba564d 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -142,6 +142,9 @@ static const struct iommu_flush_ops null_tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg);
+
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
@@ -157,6 +160,13 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
if (!ttbr1_cfg)
return ERR_PTR(-ENODEV);
+ /*
+ * Defer setting the fault handler until we have a valid adreno_smmu
+ * to avoid accidentially installing a GPU specific fault handler for
+ * the display's iommu
+ */
+ iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
if (!pagetable)
return ERR_PTR(-ENOMEM);
@@ -300,7 +310,6 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
- iommu_set_fault_handler(domain, msm_fault_handler, iommu);
atomic_set(&iommu->pagetables, 0);
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bd54c1412649..652b1dedd7c1 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
module_param(num_hw_submissions, uint, 0600);
-static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *s_entity)
-{
- struct msm_gem_submit *submit = to_msm_submit(job);
-
- if (!xa_empty(&submit->deps))
- return xa_erase(&submit->deps, submit->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
@@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
}
const struct drm_sched_backend_ops msm_sched_ops = {
- .dependency = msm_job_dependency,
.run_job = msm_job_run,
.free_job = msm_job_free
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index bb5d1cacd97e..d7b9f7f8c9e3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2268,6 +2268,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
+ /* Finish updating head(s)...
+ *
+ * NVD is rather picky about both where window assignments can change,
+ * *and* about certain core and window channel states matching.
+ *
+ * The EFI GOP driver on newer GPUs configures window channels with a
+ * different output format to what we do, and the core channel update
+ * in the assign_windows case above would result in a state mismatch.
+ *
+ * Delay some of the head update until after that point to workaround
+ * the issue. This only affects the initial modeset.
+ *
+ * TODO: handle this better when adding flexible window mapping
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+ struct nv50_head *head = nv50_head(crtc);
+
+ NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+ asyh->set.mask, asyh->clr.mask);
+
+ if (asyh->set.mask) {
+ nv50_head_flush_set_wndw(head, asyh);
+ interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+ }
+ }
+
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index ec361d17e900..d66f97280282 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
}
void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- if (asyh->set.view ) head->func->view (head, asyh);
- if (asyh->set.mode ) head->func->mode (head, asyh);
- if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
@@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.load);
head->func->olut_set(head, asyh);
}
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ if (asyh->set.view ) head->func->view (head, asyh);
+ if (asyh->set.mode ) head->func->mode (head, asyh);
+ if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index dae841dc05fd..0bac6be9ba34 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -21,6 +21,7 @@ struct nv50_head {
struct nv50_head *nv50_head_create(struct drm_device *, int index);
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
void nv50_head_flush_clr(struct nv50_head *head,
struct nv50_head_atom *asyh, bool flush);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 0b86c44878e0..59759c4fb62e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -4,7 +4,8 @@
struct nv_device_v0 {
__u8 version;
- __u8 pad01[7];
+ __u8 priv;
+ __u8 pad02[6];
__u64 device; /* device identifier, ~0 for client default */
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index ba2c28ea43d2..c68cc957248e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -61,8 +61,6 @@
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
-#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
-#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 347d2c020bd1..5d9395e651b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -9,7 +9,6 @@ struct nvif_client {
const struct nvif_driver *driver;
u64 version;
u8 route;
- bool super;
};
int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 8e85b936eaa0..7a3af05f7f98 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -11,7 +11,7 @@ struct nvif_driver {
void (*fini)(void *priv);
int (*suspend)(void *priv);
int (*resume)(void *priv);
- int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+ int (*ioctl)(void *priv, void *data, u32 size, void **hack);
void __iomem *(*map)(void *priv, u64 handle, u32 size);
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
bool keep;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 5d7017fe5039..2f86606e708c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -13,7 +13,6 @@ struct nvkm_client {
struct nvkm_client_notify *notify[32];
struct rb_root objroot;
- bool super;
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
index 71ed147ad077..f52918a43246 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -4,5 +4,5 @@
#include <core/os.h>
struct nvkm_client;
-int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0911e73f7424..70e7887ef4b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -15,7 +15,6 @@ struct nvkm_vma {
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
bool used:1; /* Region allocated. */
bool part:1; /* Region was split from an allocated region by map(). */
- bool user:1; /* Region user-allocated. */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b45ec3086285..4107b7006539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
}
client->route = NVDRM_OBJECT_ABI16;
- client->super = true;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
- client->super = false;
client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6d07e653f82d..33dca2565cca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1277,6 +1277,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
if (slave)
return;
+ nouveau_ttm_tt_unbind(bdev, ttm);
+
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
@@ -1290,8 +1292,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev,
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
- ttm_agp_unbind(ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 40362600eed2..80099ef75702 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
struct nouveau_channel *chan = *pchan;
if (chan) {
struct nouveau_cli *cli = (void *)chan->user.client;
- bool super;
-
- if (cli) {
- super = cli->base.super;
- cli->base.super = true;
- }
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
@@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
-
- if (cli)
- cli->base.super = super;
}
*pchan = NULL;
}
@@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- bool super;
int ret;
/* hack until fencenv50 is fixed, and agp access relaxed */
- super = cli->base.super;
- cli->base.super = true;
-
ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, device, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
- goto done;
+ return ret;
}
}
@@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
if (ret) {
NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
- goto done;
+ return ret;
}
ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
if (ret)
nouveau_channel_del(pchan);
-done:
- cli->base.super = super;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5e1ff870823b..1f828c9f691c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
&(struct nv_device_v0) {
.device = ~0,
+ .priv = true,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
@@ -1083,8 +1084,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (ret)
goto done;
- cli->base.super = false;
-
fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 0de6549fb875..2ca3207c13fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
struct gf100_vmm_map_v0 gf100;
} args;
u32 argc = 0;
- bool super;
- int ret;
switch (vmm->object.oclass) {
case NVIF_CLASS_VMM_NV04:
@@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
return -ENOSYS;
}
- super = vmm->object.client->super;
- vmm->object.client->super = true;
- ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
- &mem->mem, 0);
- vmm->object.client->super = super;
- return ret;
+ return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
}
void
@@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_mem_ram_v0 args = {};
- bool super = cli->base.super;
u8 type;
int ret;
@@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
- cli->base.super = true;
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
reg->num_pages << PAGE_SHIFT,
&args, sizeof(args), &mem->mem);
- cli->base.super = super;
mutex_unlock(&drm->master.lock);
return ret;
}
@@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
- bool super = cli->base.super;
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
- cli->base.super = true;
switch (cli->mem->oclass) {
case NVIF_CLASS_MEM_GF100:
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
@@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
WARN_ON(1);
break;
}
- cli->base.super = super;
mutex_unlock(&drm->master.lock);
reg->start = mem->mem.addr >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index b3f29b1ce9ea..52f5793b7274 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
}
static int
-nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
{
- return nvkm_ioctl(priv, super, data, size, hack);
+ return nvkm_ioctl(priv, data, size, hack);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 256ec5b35473..85c03c83259b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
- nouveau_sgdma_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 82b583f5fca8..b0c3422cb01f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -237,14 +237,11 @@ void
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
{
if (limit > start) {
- bool super = svmm->vmm->vmm.object.client->super;
- svmm->vmm->vmm.object.client->super = true;
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
&(struct nvif_vmm_pfnclr_v0) {
.addr = start,
.size = limit - start,
}, sizeof(struct nvif_vmm_pfnclr_v0));
- svmm->vmm->vmm.object.client->super = super;
}
}
@@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
NVIF_VMM_PFNMAP_V0_A |
NVIF_VMM_PFNMAP_V0_HOST;
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
unlock_page(page);
@@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
nouveau_hmm_convert_pfn(drm, &range, args);
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
out:
@@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
mutex_lock(&svmm->mutex);
- svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
npages * sizeof(args->p.phys[0]), NULL);
- svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f4c2e46b6fe1..2ca9d9a9e5d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -276,7 +276,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = is_swiotlb_active();
+ need_swiotlb = is_swiotlb_active(dev->dev);
#endif
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 9dc10b17ad34..5da1f4d223d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -32,6 +32,9 @@
#include <nvif/event.h>
#include <nvif/ioctl.h>
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
struct usif_notify_p {
struct drm_pending_event base;
struct {
@@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
}
static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
{
struct nouveau_cli *cli = nouveau_cli(f);
struct nvif_client *client = &cli->base;
@@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
struct usif_object *object;
int ret = -ENOSYS;
+ if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
+ return ret;
+
+ switch (args->v0.oclass) {
+ case NV_DMA_FROM_MEMORY:
+ case NV_DMA_TO_MEMORY:
+ case NV_DMA_IN_MEMORY:
+ return -EINVAL;
+ case NV_DEVICE: {
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
+
+ if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
+ return ret;
+
+ args->v0.priv = false;
+ break;
+ }
+ default:
+ if (!parent_abi16)
+ return -EINVAL;
+ break;
+ }
+
if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
return -ENOMEM;
list_add(&object->head, &cli->objects);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- object->route = args->v0.route;
- object->token = args->v0.token;
- args->v0.route = NVDRM_OBJECT_USIF;
- args->v0.token = (unsigned long)(void *)object;
- ret = nvif_client_ioctl(client, argv, argc);
- args->v0.token = object->token;
- args->v0.route = object->route;
+ object->route = args->v0.route;
+ object->token = args->v0.token;
+ args->v0.route = NVDRM_OBJECT_USIF;
+ args->v0.token = (unsigned long)(void *)object;
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret) {
+ usif_object_dtor(object);
+ return ret;
}
- if (ret)
- usif_object_dtor(object);
- return ret;
+ args->v0.token = object->token;
+ args->v0.route = object->route;
+ return 0;
}
int
@@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
struct nvif_ioctl_v0 v0;
} *argv = data;
struct usif_object *object;
+ bool abi16 = false;
u8 owner;
int ret;
@@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
mutex_unlock(&cli->mutex);
goto done;
}
+
+ abi16 = true;
}
switch (argv->v0.type) {
case NVIF_IOCTL_V0_NEW:
- ret = usif_object_new(filp, data, size, argv, argc);
+ ret = usif_object_new(filp, data, size, argv, argc, abi16);
break;
case NVIF_IOCTL_V0_NTFY_NEW:
ret = usif_notify_new(filp, data, size, argv, argc);
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 12644f811b3e..a3264a0e933a 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -32,7 +32,7 @@
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
{
- return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
+ return client->driver->ioctl(client->object.priv, data, size, NULL);
}
int
@@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
client->object.client = client;
client->object.handle = ~0;
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
- client->super = true;
client->driver = parent->driver;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 671a5c0199e0..dce1ecee2af5 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
} else
return -ENOSYS;
- return client->driver->ioctl(client->object.priv, client->super,
- data, size, hack);
+ return client->driver->ioctl(client->object.priv, data, size, hack);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index d777df5a64e6..735cb6816f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
}
int
-nvkm_ioctl(struct nvkm_client *client, bool supervisor,
- void *data, u32 size, void **hack)
+nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
{
struct nvkm_object *object = &client->object;
union {
@@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
} *args = data;
int ret = -ENOSYS;
- client->super = supervisor;
nvif_ioctl(object, "size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b930f539feec..93ddf63d1114 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2624,6 +2624,26 @@ nv174_chipset = {
.dma = { 0x00000001, gv100_dma_new },
};
+static const struct nvkm_device_chip
+nv177_chipset = {
+ .name = "GA107",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index fea9d8f2b10c..f28894fdede9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
return ret;
/* give priviledged clients register access */
- if (client->super)
+ if (args->v0.priv)
func = &nvkm_udevice_super;
else
func = &nvkm_udevice;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 55fbfe28c6dc..9669472a2749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
return ret;
}
-static void
+void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 428b3f488f03..e484d0c3b0d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -32,6 +32,7 @@ struct nvkm_dp {
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
/* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index dffcac249211..129982fef7ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
+#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
@@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto);
+
+ /* The EFI GOP driver on Ampere can leave unused DP links routed,
+ * which we don't expect. The DisableLT IED script *should* get
+ * us back to where we need to be.
+ */
+ if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+ nvkm_dp_disable(outp, ior);
+
return;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index d20cc0681a88..797131ed7d67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -26,7 +26,6 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
-#include <subdev/instmem.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
@@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
union {
struct nv_dma_v0 v0;
} *args = *pdata;
- struct nvkm_device *device = dma->engine.subdev.device;
- struct nvkm_client *client = oclass->client;
struct nvkm_object *parent = oclass->parent;
- struct nvkm_instmem *instmem = device->imem;
- struct nvkm_fb *fb = device->fb;
void *data = *pdata;
u32 size = *psize;
int ret = -ENOSYS;
@@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_V0_TARGET_VRAM:
- if (!client->super) {
- if (dmaobj->limit >= fb->ram->size - instmem->reserved)
- return -EACCES;
- if (device->card_type >= NV_50)
- return -EACCES;
- }
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_V0_TARGET_PCI:
- if (!client->super)
- return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_V0_TARGET_PCI_US:
case NV_DMA_V0_TARGET_AGP:
- if (!client->super)
- return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 90e9a0972a44..3209eb7af65f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
nvkm-y += nvkm/engine/fifo/dmanv10.o
nvkm-y += nvkm/engine/fifo/dmanv17.o
nvkm-y += nvkm/engine/fifo/dmanv40.o
-nvkm-y += nvkm/engine/fifo/dmanv50.o
-nvkm-y += nvkm/engine/fifo/dmag84.o
nvkm-y += nvkm/engine/fifo/gpfifonv50.o
nvkm-y += nvkm/engine/fifo/gpfifog84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index af8bdf275552..3a95730d7ff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
-extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
deleted file mode 100644
index fc34cddcd2f5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl826e.h>
-#include <nvif/unpack.h>
-
-static int
-g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_object *parent = oclass->parent;
- union {
- struct g82_channel_dma_v0 v0;
- } *args = data;
- struct nv50_fifo *fifo = nv50_fifo(base);
- struct nv50_fifo_chan *chan;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create channel dma size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
- "pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vmm, args->v0.pushbuf,
- args->v0.offset);
- if (!args->v0.pushbuf)
- return -EINVAL;
- } else
- return ret;
-
- if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
- return -ENOMEM;
- *pobject = &chan->base.object;
-
- ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
- oclass, chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nvkm_kmap(chan->ramfc);
- nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
- nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
- nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
- nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
- nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
- nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
- nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
- nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
- nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
- nvkm_done(chan->ramfc);
- return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-g84_fifo_dma_oclass = {
- .base.oclass = G82_CHANNEL_DMA,
- .base.minver = 0,
- .base.maxver = 0,
- .ctor = g84_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
deleted file mode 100644
index 8043718ad150..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl506e.h>
-#include <nvif/unpack.h>
-
-static int
-nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_object *parent = oclass->parent;
- union {
- struct nv50_channel_dma_v0 v0;
- } *args = data;
- struct nv50_fifo *fifo = nv50_fifo(base);
- struct nv50_fifo_chan *chan;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create channel dma size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
- "pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vmm, args->v0.pushbuf,
- args->v0.offset);
- if (!args->v0.pushbuf)
- return -EINVAL;
- } else
- return ret;
-
- if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
- return -ENOMEM;
- *pobject = &chan->base.object;
-
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
- oclass, chan);
- if (ret)
- return ret;
-
- args->v0.chid = chan->base.chid;
-
- nvkm_kmap(chan->ramfc);
- nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
- nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
- nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
- nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
- nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
- nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
- nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
- nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
- nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nvkm_done(chan->ramfc);
- return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-nv50_fifo_dma_oclass = {
- .base.oclass = NV50_CHANNEL_DMA,
- .base.minver = 0,
- .base.maxver = 0,
- .ctor = nv50_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index c0a7d0f21dac..3885c3830b94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -119,7 +119,6 @@ g84_fifo = {
.uevent_init = g84_fifo_uevent_init,
.uevent_fini = g84_fifo_uevent_fini,
.chan = {
- &g84_fifo_dma_oclass,
&g84_fifo_gpfifo_oclass,
NULL
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index b6900a52bcce..ae6c4d846eb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gk104_fifo_gpfifo_new_(fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index ee4967b706a7..743791c514fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
index abef7fb6e2d3..99aafa103a31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
@@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
"runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.runlist, args->v0.priv);
- if (args->v0.priv && !oclass->client->super)
- return -EINVAL;
return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index be94156ea248..a08742cf425a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -136,7 +136,6 @@ nv50_fifo = {
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.chan = {
- &nv50_fifo_dma_oclass,
&nv50_fifo_gpfifo_oclass,
NULL
},
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
index fac2f9a45ea6..e530bb8b3b17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
object = nvkm_object_search(client, handle, &nvkm_umem);
if (IS_ERR(object)) {
- if (client->super && client != master) {
+ if (client != master) {
spin_lock(&master->lock);
list_for_each_entry(umem, &master->umem, head) {
if (umem->object.object == handle) {
@@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
}
} else {
umem = nvkm_umem(object);
- if (!umem->priv || client->super)
- memory = nvkm_memory_ref(umem->memory);
+ memory = nvkm_memory_ref(umem->memory);
}
return memory ? memory : ERR_PTR(-ENOENT);
@@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
umem->mmu = mmu;
umem->type = mmu->type[type].type;
- umem->priv = oclass->client->super;
INIT_LIST_HEAD(&umem->head);
*pobject = &umem->object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
index 85cf692d620a..d56a594016cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -8,7 +8,6 @@ struct nvkm_umem {
struct nvkm_object object;
struct nvkm_mmu *mmu;
u8 type:8;
- bool priv:1;
bool mappable:1;
bool io:1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
index 0e4b8941da37..6870fda4b188 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
{
struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
- if (mmu->func->mem.user.oclass && oclass->client->super) {
+ if (mmu->func->mem.user.oclass) {
if (index-- == 0) {
oclass->base = mmu->func->mem.user;
oclass->ctor = nvkm_umem_new;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index c43b8248c682..d6a1f8d04c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
static int
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_pfnclr_v0 v0;
} *args = argv;
@@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (!client->super)
- return -ENOENT;
-
if (size) {
mutex_lock(&vmm->mutex);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
@@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
static int
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_pfnmap_v0 v0;
} *args = argv;
@@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (!client->super)
- return -ENOENT;
-
if (size) {
mutex_lock(&vmm->mutex);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
@@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
static int
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_unmap_v0 v0;
} *args = argv;
@@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
@@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto fail;
}
@@ -230,7 +219,6 @@ fail:
static int
nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_put_v0 v0;
} *args = argv;
@@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
- VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
- vma->user, !client->super, vma->busy);
+ if (ret = -ENOENT, vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
@@ -268,7 +255,6 @@ done:
static int
nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
- struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_get_v0 v0;
} *args = argv;
@@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return ret;
args->v0.addr = vma->addr;
- vma->user = !client->super;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 710f3f8dc7c9..8bf00b396ec1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
new->refd = vma->refd;
new->used = vma->used;
new->part = vma->part;
- new->user = vma->user;
new->busy = vma->busy;
new->mapped = vma->mapped;
list_add(&new->head, &vma->head);
@@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
static void
nvkm_vma_dump(struct nvkm_vma *vma)
{
- printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+ printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
vma->addr, (u64)vma->size,
vma->used ? '-' : 'F',
vma->mapref ? 'R' : '-',
@@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
vma->part ? 'P' : '-',
- vma->user ? 'U' : '-',
vma->busy ? 'B' : '-',
vma->mapped ? 'M' : '-',
vma->memory);
@@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
vma->mapref = true;
vma->sparse = false;
vma->used = true;
- vma->user = true;
nvkm_vmm_node_insert(vmm, vma);
list_add_tail(&vma->head, &vmm->list);
return 0;
@@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
vma->page = NVKM_VMA_PAGE_NONE;
vma->refd = NVKM_VMA_PAGE_NONE;
vma->used = false;
- vma->user = false;
nvkm_vmm_put_region(vmm, vma);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index f02abd9cb4dd..b5e733783b5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -534,15 +534,13 @@ int
gp100_vmm_mthd(struct nvkm_vmm *vmm,
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
{
- if (client->super) {
- switch (mthd) {
- case GP100_VMM_VN_FAULT_REPLAY:
- return gp100_vmm_fault_replay(vmm, argv, argc);
- case GP100_VMM_VN_FAULT_CANCEL:
- return gp100_vmm_fault_cancel(vmm, argv, argc);
- default:
- break;
- }
+ switch (mthd) {
+ case GP100_VMM_VN_FAULT_REPLAY:
+ return gp100_vmm_fault_replay(vmm, argv, argc);
+ case GP100_VMM_VN_FAULT_CANCEL:
+ return gp100_vmm_fault_cancel(vmm, argv, argc);
+ default:
+ break;
}
return -EINVAL;
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index e7281da5bc6a..d6e4df291d6f 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -3,7 +3,6 @@ config DRM_OMAP
tristate "OMAP DRM"
depends on DRM
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
- select OMAP2_DSS
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select HDMI
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index beb581b96ecd..369cb76512fe 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -77,14 +77,26 @@ config DRM_PANEL_LVDS
backlight handling if the panel is attached to a backlight controller.
config DRM_PANEL_SIMPLE
- tristate "support for simple panels"
+ tristate "support for simple panels (other than eDP ones)"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ help
+ DRM panel driver for dumb non-eDP panels that need at most a regulator
+ and a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_EDP
+ tristate "support for simple Embedded DisplayPort panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
depends on PM
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
help
- DRM panel driver for dumb panels that need at most a regulator and
+ DRM panel driver for dumb eDP panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
that it can be automatically turned off when the panel goes into a
low power state.
@@ -392,6 +404,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0
depends on DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6D27A1
+ tristate "Samsung S6D27A1 DPI panel driver"
+ depends on OF && SPI && GPIOLIB
+ select DRM_MIPI_DBI
+ help
+ Say Y here if you want to enable support for the Samsung
+ S6D27A1 DPI 480x800 panel.
+
+ This panel can be found in Samsung Galaxy Ace 2
+ GT-I8160 mobile phone.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index c8132050bcec..6e30640b9099 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_EDP) += panel-edp.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
new file mode 100644
index 000000000000..4c37c4f6d26c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -0,0 +1,1895 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_aux_bus.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+
+/**
+ * struct panel_delay - Describes delays for a simple panel.
+ */
+struct panel_delay {
+ /**
+ * @hpd_reliable: Time for HPD to be reliable
+ *
+ * The time (in milliseconds) that it takes after powering the panel
+ * before the HPD signal is reliable. Ideally this is 0 but some panels,
+ * board designs, or bad pulldown configs can cause a glitch here.
+ *
+ * NOTE: on some old panel data this number appers to be much too big.
+ * Presumably some old panels simply didn't have HPD hooked up and put
+ * the hpd_absent here because this field predates the
+ * hpd_absent. While that works, it's non-ideal.
+ */
+ unsigned int hpd_reliable;
+
+ /**
+ * @hpd_absent: Time to wait if HPD isn't hooked up.
+ *
+ * Add this to the prepare delay if we know Hot Plug Detect isn't used.
+ *
+ * This is T3-max on eDP timing diagrams or the delay from power on
+ * until HPD is guaranteed to be asserted.
+ */
+ unsigned int hpd_absent;
+
+ /**
+ * @prepare_to_enable: Time between prepare and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when prepare finished and enable may begin. If at
+ * enable time less time has passed since prepare finished,
+ * the driver waits for the remaining time.
+ *
+ * If a fixed enable delay is also specified, we'll start
+ * counting before delaying for the fixed delay.
+ *
+ * If a fixed prepare delay is also specified, we won't start
+ * counting until after the fixed delay. We can't overlap this
+ * fixed delay with the min time because the fixed delay
+ * doesn't happen at the end of the function if a HPD GPIO was
+ * specified.
+ *
+ * In other words:
+ * prepare()
+ * ...
+ * // do fixed prepare delay
+ * // wait for HPD GPIO if applicable
+ * // start counting for prepare_to_enable
+ *
+ * enable()
+ * // do fixed enable delay
+ * // enforce prepare_to_enable min time
+ *
+ * This is not specified in a standard way on eDP timing diagrams.
+ * It is effectively the time from HPD going high till you can
+ * turn on the backlight.
+ */
+ unsigned int prepare_to_enable;
+
+ /**
+ * @enable: Time for the panel to display a valid frame.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data.
+ *
+ * This is (T6-min + max(T7-max, T8-min)) on eDP timing diagrams or
+ * the delay after link training finishes until we can turn the
+ * backlight on and see valid data.
+ */
+ unsigned int enable;
+
+ /**
+ * @disable: Time for the panel to turn the display off.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible).
+ *
+ * This is T9-min (delay from backlight off to end of valid video
+ * data) on eDP timing diagrams. It is not common to set.
+ */
+ unsigned int disable;
+
+ /**
+ * @unprepare: Time to power down completely.
+ *
+ * The time (in milliseconds) that it takes for the panel
+ * to power itself down completely.
+ *
+ * This time is used to prevent a future "prepare" from
+ * starting until at least this many milliseconds has passed.
+ * If at prepare time less time has passed since unprepare
+ * finished, the driver waits for the remaining time.
+ *
+ * This is T12-min on eDP timing diagrams.
+ */
+ unsigned int unprepare;
+};
+
+/**
+ * struct panel_desc - Describes a simple panel.
+ */
+struct panel_desc {
+ /**
+ * @modes: Pointer to array of fixed modes appropriate for this panel.
+ *
+ * If only one mode then this can just be the address of the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ */
+ const struct drm_display_mode *modes;
+
+ /** @num_modes: Number of elements in modes array. */
+ unsigned int num_modes;
+
+ /**
+ * @timings: Pointer to array of display timings
+ *
+ * NOTE: cannot be used with "modes" and also these will be used to
+ * validate a device tree override if one is present.
+ */
+ const struct display_timing *timings;
+
+ /** @num_timings: Number of elements in timings array. */
+ unsigned int num_timings;
+
+ /** @bpc: Bits per color. */
+ unsigned int bpc;
+
+ /** @size: Structure containing the physical size of this panel. */
+ struct {
+ /**
+ * @size.width: Width (in mm) of the active display area.
+ */
+ unsigned int width;
+
+ /**
+ * @size.height: Height (in mm) of the active display area.
+ */
+ unsigned int height;
+ } size;
+
+ /** @delay: Structure containing various delay values for this panel. */
+ struct panel_delay delay;
+};
+
+/**
+ * struct edp_panel_entry - Maps panel ID to delay / panel name.
+ */
+struct edp_panel_entry {
+ /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
+ u32 panel_id;
+
+ /* @delay: The power sequencing delays needed for this panel. */
+ const struct panel_delay *delay;
+
+ /* @name: Name of this panel (for printing to logs). */
+ const char *name;
+};
+
+struct panel_edp {
+ struct drm_panel base;
+ bool enabled;
+ bool no_hpd;
+
+ bool prepared;
+
+ ktime_t prepared_time;
+ ktime_t unprepared_time;
+
+ const struct panel_desc *desc;
+
+ struct regulator *supply;
+ struct i2c_adapter *ddc;
+ struct drm_dp_aux *aux;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *hpd_gpio;
+
+ struct edid *edid;
+
+ struct drm_display_mode override_mode;
+
+ enum drm_panel_orientation orientation;
+};
+
+static inline struct panel_edp *to_panel_edp(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_edp, base);
+}
+
+static unsigned int panel_edp_get_timings_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_timings == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ return num;
+}
+
+static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay,
+ drm_mode_vrefresh(m));
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ return num;
+}
+
+static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ bool has_override = panel->override_mode.type;
+ unsigned int num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ if (has_override) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel->override_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num = 1;
+ } else {
+ dev_err(panel->base.dev, "failed to add override mode\n");
+ }
+ }
+
+ /* Only add timings if override was not there or failed to validate */
+ if (num == 0 && panel->desc->num_timings)
+ num = panel_edp_get_timings_modes(panel, connector);
+
+ /*
+ * Only add fixed modes if timings/override added no mode.
+ *
+ * We should only ever have either the display timings specified
+ * or a fixed mode. Anything else is rather bogus.
+ */
+ WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
+ if (num == 0)
+ num = panel_edp_get_display_modes(panel, connector);
+
+ connector->display_info.bpc = panel->desc->bpc;
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+
+ return num;
+}
+
+static void panel_edp_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+ ktime_t now_ktime, min_ktime;
+
+ if (!min_ms)
+ return;
+
+ min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+ now_ktime = ktime_get();
+
+ if (ktime_before(now_ktime, min_ktime))
+ msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
+static int panel_edp_disable(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_edp_suspend(struct device *dev)
+{
+ struct panel_edp *p = dev_get_drvdata(dev);
+
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ return 0;
+}
+
+static int panel_edp_unprepare(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int ret;
+
+ /* Unpreparing when already unprepared is a no-op */
+ if (!p->prepared)
+ return 0;
+
+ pm_runtime_mark_last_busy(panel->dev);
+ ret = pm_runtime_put_autosuspend(panel->dev);
+ if (ret < 0)
+ return ret;
+ p->prepared = false;
+
+ return 0;
+}
+
+static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
+{
+ int err;
+
+ p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(p->hpd_gpio)) {
+ err = PTR_ERR(p->hpd_gpio);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int panel_edp_prepare_once(struct panel_edp *p)
+{
+ struct device *dev = p->base.dev;
+ unsigned int delay;
+ int err;
+ int hpd_asserted;
+ unsigned long hpd_wait_us;
+
+ panel_edp_wait(p->unprepared_time, p->desc->delay.unprepare);
+
+ err = regulator_enable(p->supply);
+ if (err < 0) {
+ dev_err(dev, "failed to enable supply: %d\n", err);
+ return err;
+ }
+
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
+
+ delay = p->desc->delay.hpd_reliable;
+ if (p->no_hpd)
+ delay = max(delay, p->desc->delay.hpd_absent);
+ if (delay)
+ msleep(delay);
+
+ if (p->hpd_gpio) {
+ if (p->desc->delay.hpd_absent)
+ hpd_wait_us = p->desc->delay.hpd_absent * 1000UL;
+ else
+ hpd_wait_us = 2000000;
+
+ err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+ hpd_asserted, hpd_asserted,
+ 1000, hpd_wait_us);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+
+ if (err) {
+ if (err != -ETIMEDOUT)
+ dev_err(dev,
+ "error waiting for hpd GPIO: %d\n", err);
+ goto error;
+ }
+ }
+
+ p->prepared_time = ktime_get();
+
+ return 0;
+
+error:
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ return err;
+}
+
+/*
+ * Some panels simply don't always come up and need to be power cycled to
+ * work properly. We'll allow for a handful of retries.
+ */
+#define MAX_PANEL_PREPARE_TRIES 5
+
+static int panel_edp_resume(struct device *dev)
+{
+ struct panel_edp *p = dev_get_drvdata(dev);
+ int ret;
+ int try;
+
+ for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
+ ret = panel_edp_prepare_once(p);
+ if (ret != -ETIMEDOUT)
+ break;
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "Prepare timeout after %d tries\n", try);
+ else if (try)
+ dev_warn(dev, "Prepare needed %d retries\n", try);
+
+ return ret;
+}
+
+static int panel_edp_prepare(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int ret;
+
+ /* Preparing when already prepared is a no-op */
+ if (p->prepared)
+ return 0;
+
+ ret = pm_runtime_get_sync(panel->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(panel->dev);
+ return ret;
+ }
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_edp_enable(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ unsigned int delay;
+
+ if (p->enabled)
+ return 0;
+
+ delay = p->desc->delay.enable;
+
+ /*
+ * If there is a "prepare_to_enable" delay then that's supposed to be
+ * the delay from HPD going high until we can turn the backlight on.
+ * However, we can only count this if HPD is handled by the panel
+ * driver, not if it goes to a dedicated pin on the controller.
+ * If we aren't handling the HPD pin ourselves then the best we
+ * can do is assume that HPD went high immediately before we were
+ * called (and link training took zero time).
+ *
+ * NOTE: if we ever end up in this "if" statement then we're
+ * guaranteed that the panel_edp_wait() call below will do no delay.
+ * It already handles that case, though, so we don't need any special
+ * code for it.
+ */
+ if (p->desc->delay.prepare_to_enable && !p->hpd_gpio && !p->no_hpd)
+ delay = max(delay, p->desc->delay.prepare_to_enable);
+
+ if (delay)
+ msleep(delay);
+
+ panel_edp_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int panel_edp_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int num = 0;
+
+ /* probe EDID if a DDC bus is available */
+ if (p->ddc) {
+ pm_runtime_get_sync(panel->dev);
+
+ if (!p->edid)
+ p->edid = drm_get_edid(connector, p->ddc);
+
+ if (p->edid)
+ num += drm_add_edid_modes(connector, p->edid);
+
+ pm_runtime_mark_last_busy(panel->dev);
+ pm_runtime_put_autosuspend(panel->dev);
+ }
+
+ /*
+ * Add hard-coded panel modes. Don't call this if there are no timings
+ * and no modes (the generic edp-panel case) because it will clobber
+ * the display_info that was already set by drm_add_edid_modes().
+ */
+ if (p->desc->num_timings || p->desc->num_modes)
+ num += panel_edp_get_non_edid_modes(p, connector);
+ else if (!num)
+ dev_warn(p->base.dev, "No display modes\n");
+
+ /* set up connector's "panel orientation" property */
+ drm_connector_set_panel_orientation(connector, p->orientation);
+
+ return num;
+}
+
+static int panel_edp_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
+static const struct drm_panel_funcs panel_edp_funcs = {
+ .disable = panel_edp_disable,
+ .unprepare = panel_edp_unprepare,
+ .prepare = panel_edp_prepare,
+ .enable = panel_edp_enable,
+ .get_modes = panel_edp_get_modes,
+ .get_timings = panel_edp_get_timings,
+};
+
+#define PANEL_EDP_BOUNDS_CHECK(to_check, bounds, field) \
+ (to_check->field.typ >= bounds->field.min && \
+ to_check->field.typ <= bounds->field.max)
+static void panel_edp_parse_panel_timing_node(struct device *dev,
+ struct panel_edp *panel,
+ const struct display_timing *ot)
+{
+ const struct panel_desc *desc = panel->desc;
+ struct videomode vm;
+ unsigned int i;
+
+ if (WARN_ON(desc->num_modes)) {
+ dev_err(dev, "Reject override mode: panel has a fixed mode\n");
+ return;
+ }
+ if (WARN_ON(!desc->num_timings)) {
+ dev_err(dev, "Reject override mode: no timings specified\n");
+ return;
+ }
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+
+ if (!PANEL_EDP_BOUNDS_CHECK(ot, dt, hactive) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hfront_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hback_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hsync_len) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vactive) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vfront_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vback_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vsync_len))
+ continue;
+
+ if (ot->flags != dt->flags)
+ continue;
+
+ videomode_from_timing(ot, &vm);
+ drm_display_mode_from_videomode(&vm, &panel->override_mode);
+ panel->override_mode.type |= DRM_MODE_TYPE_DRIVER |
+ DRM_MODE_TYPE_PREFERRED;
+ break;
+ }
+
+ if (WARN_ON(!panel->override_mode.type))
+ dev_err(dev, "Reject override mode: No display_timing found\n");
+}
+
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
+
+static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
+{
+ const struct edp_panel_entry *edp_panel;
+ struct panel_desc *desc;
+ u32 panel_id;
+ char vend[4];
+ u16 product_id;
+ u32 reliable_ms = 0;
+ u32 absent_ms = 0;
+ int ret;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ panel->desc = desc;
+
+ /*
+ * Read the dts properties for the initial probe. These are used by
+ * the runtime resume code which will get called by the
+ * pm_runtime_get_sync() call below.
+ */
+ of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
+ desc->delay.hpd_reliable = reliable_ms;
+ of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
+ desc->delay.hpd_reliable = absent_ms;
+
+ /* Power the panel on so we can read the EDID */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret);
+ goto exit;
+ }
+
+ panel_id = drm_edid_get_panel_id(panel->ddc);
+ if (!panel_id) {
+ dev_err(dev, "Couldn't identify panel via EDID\n");
+ ret = -EIO;
+ goto exit;
+ }
+ drm_edid_decode_panel_id(panel_id, vend, &product_id);
+
+ edp_panel = find_edp_panel(panel_id);
+
+ /*
+ * We're using non-optimized timings and want it really obvious that
+ * someone needs to add an entry to the table, so we'll do a WARN_ON
+ * splat.
+ */
+ if (WARN_ON(!edp_panel)) {
+ dev_warn(dev,
+ "Unknown panel %s %#06x, using conservative timings\n",
+ vend, product_id);
+
+ /*
+ * It's highly likely that the panel will work if we use very
+ * conservative timings, so let's do that. We already know that
+ * the HPD-related delays must have worked since we got this
+ * far, so we really just need the "unprepare" / "enable"
+ * delays. We don't need "prepare_to_enable" since that
+ * overlaps the "enable" delay anyway.
+ *
+ * Nearly all panels have a "unprepare" delay of 500 ms though
+ * there are a few with 1000. Let's stick 2000 in just to be
+ * super conservative.
+ *
+ * An "enable" delay of 80 ms seems the most common, but we'll
+ * throw in 200 ms to be safe.
+ */
+ desc->delay.unprepare = 2000;
+ desc->delay.enable = 200;
+ } else {
+ dev_info(dev, "Detected %s %s (%#06x)\n",
+ vend, edp_panel->name, product_id);
+
+ /* Update the delay; everything else comes from EDID */
+ desc->delay = *edp_panel->delay;
+ }
+
+ ret = 0;
+exit:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
+ struct drm_dp_aux *aux)
+{
+ struct panel_edp *panel;
+ struct display_timing dt;
+ struct device_node *ddc;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->prepared_time = 0;
+ panel->desc = desc;
+ panel->aux = aux;
+
+ panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+ if (!panel->no_hpd) {
+ err = panel_edp_get_hpd_gpio(dev, panel);
+ if (err)
+ return err;
+ }
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return PTR_ERR(panel->supply);
+
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio)) {
+ err = PTR_ERR(panel->enable_gpio);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
+
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return err;
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc)
+ return -EPROBE_DEFER;
+ } else if (aux) {
+ panel->ddc = &aux->ddc;
+ }
+
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_edp_parse_panel_timing_node(dev, panel, &dt);
+
+ dev_set_drvdata(dev, panel);
+
+ drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ goto err_finished_ddc_init;
+
+ /*
+ * We use runtime PM for prepare / unprepare since those power the panel
+ * on and off and those can be very slow operations. This is important
+ * to optimize powering the panel on briefly to read the EDID before
+ * fully enabling the panel.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ if (of_device_is_compatible(dev->of_node, "edp-panel")) {
+ err = generic_edp_panel_probe(dev, panel);
+ if (err) {
+ dev_err_probe(dev, err,
+ "Couldn't detect panel nor find a fallback\n");
+ goto err_finished_pm_runtime;
+ }
+ /* generic_edp_panel_probe() replaces desc in the panel */
+ desc = panel->desc;
+ } else if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10) {
+ dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
+ }
+
+ if (!panel->base.backlight && panel->aux) {
+ pm_runtime_get_sync(dev);
+ err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ if (err)
+ goto err_finished_pm_runtime;
+ }
+
+ drm_panel_add(&panel->base);
+
+ return 0;
+
+err_finished_pm_runtime:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+err_finished_ddc_init:
+ if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ put_device(&panel->ddc->dev);
+
+ return err;
+}
+
+static int panel_edp_remove(struct device *dev)
+{
+ struct panel_edp *panel = dev_get_drvdata(dev);
+
+ drm_panel_remove(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
+
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+ if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ put_device(&panel->ddc->dev);
+
+ kfree(panel->edid);
+ panel->edid = NULL;
+
+ return 0;
+}
+
+static void panel_edp_shutdown(struct device *dev)
+{
+ struct panel_edp *panel = dev_get_drvdata(dev);
+
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
+}
+
+static const struct display_timing auo_b101ean01_timing = {
+ .pixelclock = { 65300000, 72500000, 75000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 119, 119 },
+ .hback_porch = { 21, 21, 21 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 8, 8, 8 },
+ .vsync_len = { 18, 20, 20 },
+};
+
+static const struct panel_desc auo_b101ean01 = {
+ .timings = &auo_b101ean01_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
+static const struct drm_display_mode auo_b116xak01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 10,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 6,
+ .vtotal = 768 + 4 + 6 + 15,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xak01 = {
+ .modes = &auo_b116xak01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ },
+};
+
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .enable = 400,
+ },
+};
+
+static const struct drm_display_mode auo_b133han05_mode = {
+ .clock = 142600,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 58,
+ .hsync_end = 1920 + 58 + 42,
+ .htotal = 1920 + 58 + 42 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 54,
+};
+
+static const struct panel_desc auo_b133han05 = {
+ .modes = &auo_b133han05_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_reliable = 100,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_reliable = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode auo_b133xtn01_mode = {
+ .clock = 69500,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 20,
+ .vdisplay = 768,
+ .vsync_start = 768 + 3,
+ .vsync_end = 768 + 3 + 6,
+ .vtotal = 768 + 3 + 6 + 13,
+};
+
+static const struct panel_desc auo_b133xtn01 = {
+ .modes = &auo_b133xtn01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+};
+
+static const struct drm_display_mode auo_b140han06_mode = {
+ .clock = 141000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 152,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 14,
+ .vtotal = 1080 + 3 + 14 + 19,
+};
+
+static const struct panel_desc auo_b140han06 = {
+ .modes = &auo_b140han06_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ .hpd_reliable = 100,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
+ {
+ .clock = 71900,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ },
+ {
+ .clock = 57500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ },
+};
+
+static const struct panel_desc boe_nv101wxmn51 = {
+ .modes = boe_nv101wxmn51_modes,
+ .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+ {
+ .clock = 207800,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ {
+ .clock = 138500,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+ .modes = boe_nv110wtm_n61_modes,
+ .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+ .bpc = 8,
+ .size = {
+ .width = 233,
+ .height = 155,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .prepare_to_enable = 80,
+ .enable = 50,
+ .unprepare = 500,
+ },
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+ .clock = 147840,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 6,
+ .vtotal = 1080 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+ .modes = &boe_nv133fhm_n61_modes,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ /*
+ * When power is first given to the panel there's a short
+ * spike on the HPD line. It was explained that this spike
+ * was until the TCON data download was complete. On
+ * one system this was measured at 8 ms. We'll put 15 ms
+ * in the prepare delay just to be safe. That means:
+ * - If HPD isn't hooked up you still have 200 ms delay.
+ * - If HPD is hooked up we won't try to look at it for the
+ * first 15 ms.
+ */
+ .hpd_reliable = 15,
+ .hpd_absent = 200,
+
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
+ {
+ .clock = 148500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 2200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1125,
+ },
+};
+
+static const struct panel_desc boe_nv140fhmn49 = {
+ .modes = boe_nv140fhmn49_modes,
+ .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
+ .bpc = 6,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bca_ea1_mode = {
+ .clock = 76420,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 136,
+ .hsync_end = 1366 + 136 + 30,
+ .htotal = 1366 + 136 + 30 + 60,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 12,
+ .vtotal = 768 + 8 + 12 + 12,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bca_ea1 = {
+ .modes = &innolux_n116bca_ea1_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .prepare_to_enable = 80,
+ .unprepare = 500,
+ },
+};
+
+/*
+ * Datasheet specifies that at 60 Hz refresh rate:
+ * - total horizontal time: { 1506, 1592, 1716 }
+ * - total vertical time: { 788, 800, 868 }
+ *
+ * ...but doesn't go into exactly how that should be split into a front
+ * porch, back porch, or sync length. For now we'll leave a single setting
+ * here which allows a bit of tweaking of the pixel clock at the expense of
+ * refresh rate.
+ */
+static const struct display_timing innolux_n116bge_timing = {
+ .pixelclock = { 72600000, 76420000, 80240000 },
+ .hactive = { 1366, 1366, 1366 },
+ .hfront_porch = { 136, 136, 136 },
+ .hback_porch = { 60, 60, 60 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 8, 8, 8 },
+ .vback_porch = { 12, 12, 12 },
+ .vsync_len = { 12, 12, 12 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .timings = &innolux_n116bge_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n125hce_gn1_mode = {
+ .clock = 162000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 4,
+ .vtotal = 1080 + 4 + 4 + 24,
+};
+
+static const struct panel_desc innolux_n125hce_gn1 = {
+ .modes = &innolux_n125hce_gn1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 276,
+ .height = 155,
+ },
+};
+
+static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_p120zdg_bf1 = {
+ .modes = &innolux_p120zdg_bf1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 254,
+ .height = 169,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+ .clock = 138778,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 24,
+ .hsync_end = 1920 + 24 + 48,
+ .htotal = 1920 + 24 + 48 + 88,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 12,
+ .vtotal = 1080 + 3 + 12 + 17,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+ .modes = &ivo_m133nwf4_r0_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
+ .clock = 81000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 32,
+ .htotal = 1366 + 40 + 32 + 62,
+ .vdisplay = 768,
+ .vsync_start = 768 + 5,
+ .vsync_end = 768 + 5 + 5,
+ .vtotal = 768 + 5 + 5 + 122,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
+ .modes = &kingdisplay_kd116n21_30nv_a010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ },
+};
+
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+ .clock = 200000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 12,
+ .hsync_end = 1536 + 12 + 16,
+ .htotal = 1536 + 12 + 16 + 48,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 8,
+ .vsync_end = 2048 + 8 + 4,
+ .vtotal = 2048 + 8 + 4 + 8,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+ .modes = &lg_lp079qx1_sp0v_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 129,
+ .height = 171,
+ },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+ .clock = 205210,
+ .hdisplay = 2048,
+ .hsync_start = 2048 + 150,
+ .hsync_end = 2048 + 150 + 5,
+ .htotal = 2048 + 150 + 5 + 5,
+ .vdisplay = 1536,
+ .vsync_start = 1536 + 3,
+ .vsync_end = 1536 + 3 + 1,
+ .vtotal = 1536 + 3 + 1 + 9,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+ .modes = &lg_lp097qx1_spa1_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 208,
+ .height = 147,
+ },
+};
+
+static const struct drm_display_mode lg_lp120up1_mode = {
+ .clock = 162300,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 4,
+ .vsync_end = 1280 + 4 + 4,
+ .vtotal = 1280 + 4 + 4 + 12,
+};
+
+static const struct panel_desc lg_lp120up1 = {
+ .modes = &lg_lp120up1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 267,
+ .height = 183,
+ },
+};
+
+static const struct drm_display_mode lg_lp129qe_mode = {
+ .clock = 285250,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1700,
+ .vsync_start = 1700 + 3,
+ .vsync_end = 1700 + 3 + 10,
+ .vtotal = 1700 + 3 + 10 + 36,
+};
+
+static const struct panel_desc lg_lp129qe = {
+ .modes = &lg_lp129qe_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 272,
+ .height = 181,
+ },
+};
+
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .hpd_reliable = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+ .clock = 271560,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 2,
+ .vsync_end = 1600 + 2 + 5,
+ .vtotal = 1600 + 2 + 5 + 57,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+ .modes = &samsung_lsn122dl01_c01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
+static const struct drm_display_mode samsung_ltn140at29_301_mode = {
+ .clock = 76300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 48,
+ .htotal = 1366 + 64 + 48 + 128,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 5,
+ .vtotal = 768 + 2 + 5 + 17,
+};
+
+static const struct panel_desc samsung_ltn140at29_301 = {
+ .modes = &samsung_ltn140at29_301_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 320,
+ .height = 187,
+ },
+};
+
+static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
+ .clock = 168480,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 3,
+ .vsync_end = 1280 + 3 + 10,
+ .vtotal = 1280 + 3 + 10 + 57,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc sharp_ld_d5116z01b = {
+ .modes = &sharp_ld_d5116z01b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 260,
+ .height = 120,
+ },
+};
+
+static const struct display_timing sharp_lq123p1jx31_timing = {
+ .pixelclock = { 252750000, 252750000, 266604720 },
+ .hactive = { 2400, 2400, 2400 },
+ .hfront_porch = { 48, 48, 48 },
+ .hback_porch = { 80, 80, 84 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1600, 1600, 1600 },
+ .vfront_porch = { 3, 3, 3 },
+ .vback_porch = { 33, 33, 120 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+ .timings = &sharp_lq123p1jx31_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+ .delay = {
+ .hpd_reliable = 110,
+ .enable = 50,
+ .unprepare = 550,
+ },
+};
+
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+ .clock = 147000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 15,
+ .vsync_end = 1200 + 15 + 2,
+ .vtotal = 1200 + 15 + 2 + 18,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+ .modes = &starry_kr122ea0sra_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 10 + 200,
+ .enable = 50,
+ .unprepare = 10 + 500,
+ },
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ /* Must be first */
+ .compatible = "edp-panel",
+ }, {
+ .compatible = "auo,b101ean01",
+ .data = &auo_b101ean01,
+ }, {
+ .compatible = "auo,b116xa01",
+ .data = &auo_b116xak01,
+ }, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
+ .compatible = "auo,b133han05",
+ .data = &auo_b133han05,
+ }, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
+ .compatible = "auo,b133xtn01",
+ .data = &auo_b133xtn01,
+ }, {
+ .compatible = "auo,b140han06",
+ .data = &auo_b140han06,
+ }, {
+ .compatible = "boe,nv101wxmn51",
+ .data = &boe_nv101wxmn51,
+ }, {
+ .compatible = "boe,nv110wtm-n61",
+ .data = &boe_nv110wtm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n61",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n62",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv140fhmn49",
+ .data = &boe_nv140fhmn49,
+ }, {
+ .compatible = "innolux,n116bca-ea1",
+ .data = &innolux_n116bca_ea1,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n125hce-gn1",
+ .data = &innolux_n125hce_gn1,
+ }, {
+ .compatible = "innolux,p120zdg-bf1",
+ .data = &innolux_p120zdg_bf1,
+ }, {
+ .compatible = "ivo,m133nwf4-r0",
+ .data = &ivo_m133nwf4_r0,
+ }, {
+ .compatible = "kingdisplay,kd116n21-30nv-a010",
+ .data = &kingdisplay_kd116n21_30nv_a010,
+ }, {
+ .compatible = "lg,lp079qx1-sp0v",
+ .data = &lg_lp079qx1_sp0v,
+ }, {
+ .compatible = "lg,lp097qx1-spa1",
+ .data = &lg_lp097qx1_spa1,
+ }, {
+ .compatible = "lg,lp120up1",
+ .data = &lg_lp120up1,
+ }, {
+ .compatible = "lg,lp129qe",
+ .data = &lg_lp129qe,
+ }, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
+ .compatible = "samsung,lsn122dl01-c01",
+ .data = &samsung_lsn122dl01_c01,
+ }, {
+ .compatible = "samsung,ltn140at29-301",
+ .data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "sharp,ld-d5116z01b",
+ .data = &sharp_ld_d5116z01b,
+ }, {
+ .compatible = "sharp,lq123p1jx31",
+ .data = &sharp_lq123p1jx31,
+ }, {
+ .compatible = "starry,kr122ea0sra",
+ .data = &starry_kr122ea0sra,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static const struct panel_delay delay_200_500_p2e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .prepare_to_enable = 80,
+};
+
+static const struct panel_delay delay_200_500_p2e100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .prepare_to_enable = 100,
+};
+
+static const struct panel_delay delay_200_500_e50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+};
+
+#define EDP_PANEL_ENTRY(vend, product_id, _delay, _name) \
+{ \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend, product_id), \
+ .delay = _delay \
+}
+
+/*
+ * This table is used to figure out power sequencing delays for panels that
+ * are detected by EDID. Entries here may point to entries in the
+ * platform_of_match table (if a panel is listed in both places).
+ *
+ * Sort first by vendor, then by product ID.
+ */
+static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY("AUO", 0x405c, &auo_b116xak01.delay, "B116XAK01"),
+ EDP_PANEL_ENTRY("AUO", 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+
+ EDP_PANEL_ENTRY("BOE", 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY("BOE", 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY("BOE", 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY("BOE", 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+
+ EDP_PANEL_ENTRY("CMN", 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+
+ EDP_PANEL_ENTRY("KDB", 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+
+ EDP_PANEL_ENTRY("SHP", 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+
+ { /* sentinal */ }
+};
+
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id)
+{
+ const struct edp_panel_entry *panel;
+
+ if (!panel_id)
+ return NULL;
+
+ for (panel = edp_panels; panel->panel_id; panel++)
+ if (panel->panel_id == panel_id)
+ return panel;
+
+ return NULL;
+}
+
+static int panel_edp_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ /* Skip one since "edp-panel" is only supported on DP AUX bus */
+ id = of_match_node(platform_of_match + 1, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_edp_probe(&pdev->dev, id->data, NULL);
+}
+
+static int panel_edp_platform_remove(struct platform_device *pdev)
+{
+ return panel_edp_remove(&pdev->dev);
+}
+
+static void panel_edp_platform_shutdown(struct platform_device *pdev)
+{
+ panel_edp_shutdown(&pdev->dev);
+}
+
+static const struct dev_pm_ops panel_edp_pm_ops = {
+ SET_RUNTIME_PM_OPS(panel_edp_suspend, panel_edp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver panel_edp_platform_driver = {
+ .driver = {
+ .name = "panel-edp",
+ .of_match_table = platform_of_match,
+ .pm = &panel_edp_pm_ops,
+ },
+ .probe = panel_edp_platform_probe,
+ .remove = panel_edp_platform_remove,
+ .shutdown = panel_edp_platform_shutdown,
+};
+
+static int panel_edp_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, aux_ep->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_edp_probe(&aux_ep->dev, id->data, aux_ep->aux);
+}
+
+static void panel_edp_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
+{
+ panel_edp_remove(&aux_ep->dev);
+}
+
+static void panel_edp_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
+{
+ panel_edp_shutdown(&aux_ep->dev);
+}
+
+static struct dp_aux_ep_driver panel_edp_dp_aux_ep_driver = {
+ .driver = {
+ .name = "panel-simple-dp-aux",
+ .of_match_table = platform_of_match, /* Same as platform one! */
+ .pm = &panel_edp_pm_ops,
+ },
+ .probe = panel_edp_dp_aux_ep_probe,
+ .remove = panel_edp_dp_aux_ep_remove,
+ .shutdown = panel_edp_dp_aux_ep_shutdown,
+};
+
+static int __init panel_edp_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&panel_edp_platform_driver);
+ if (err < 0)
+ return err;
+
+ err = dp_aux_dp_driver_register(&panel_edp_dp_aux_ep_driver);
+ if (err < 0)
+ goto err_did_platform_register;
+
+ return 0;
+
+err_did_platform_register:
+ platform_driver_unregister(&panel_edp_platform_driver);
+
+ return err;
+}
+module_init(panel_edp_init);
+
+static void __exit panel_edp_exit(void)
+{
+ dp_aux_dp_driver_unregister(&panel_edp_dp_aux_ep_driver);
+ platform_driver_unregister(&panel_edp_platform_driver);
+}
+module_exit(panel_edp_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple eDP Panels");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index f80b44a8a700..dfb43b1374e7 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -60,6 +60,9 @@
#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
+#define OTM8009A_HDISPLAY 480
+#define OTM8009A_VDISPLAY 800
+
struct otm8009a {
struct device *dev;
struct drm_panel panel;
@@ -70,19 +73,35 @@ struct otm8009a {
bool enabled;
};
-static const struct drm_display_mode default_mode = {
- .clock = 29700,
- .hdisplay = 480,
- .hsync_start = 480 + 98,
- .hsync_end = 480 + 98 + 32,
- .htotal = 480 + 98 + 32 + 98,
- .vdisplay = 800,
- .vsync_start = 800 + 15,
- .vsync_end = 800 + 15 + 10,
- .vtotal = 800 + 15 + 10 + 14,
- .flags = 0,
- .width_mm = 52,
- .height_mm = 86,
+static const struct drm_display_mode modes[] = {
+ { /* 50 Hz, preferred */
+ .clock = 29700,
+ .hdisplay = 480,
+ .hsync_start = 480 + 98,
+ .hsync_end = 480 + 98 + 32,
+ .htotal = 480 + 98 + 32 + 98,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 14,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 52,
+ .height_mm = 86,
+ },
+ { /* 60 Hz */
+ .clock = 33000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 70,
+ .hsync_end = 480 + 70 + 32,
+ .htotal = 480 + 70 + 32 + 72,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 16,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 52,
+ .height_mm = 86,
+ },
};
static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
@@ -208,12 +227,11 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Default portrait 480x800 rgb24 */
dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_column_address(dsi, 0,
- default_mode.hdisplay - 1);
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1);
if (ret)
return ret;
- ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1);
if (ret)
return ret;
@@ -337,24 +355,33 @@ static int otm8009a_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &default_mode);
- if (!mode) {
- dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
- return -ENOMEM;
+ unsigned int num_modes = ARRAY_SIZE(modes);
+ unsigned int i;
+
+ for (i = 0; i < num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev, &modes[i]);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ modes[i].hdisplay,
+ modes[i].vdisplay,
+ drm_mode_vrefresh(&modes[i]));
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ /* Setting first mode as preferred */
+ if (!i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
}
- drm_mode_set_name(mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
- return 1;
+ return num_modes;
}
static const struct drm_panel_funcs otm8009a_drm_funcs = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
new file mode 100644
index 000000000000..1696ceb36aa0
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel.
+ * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone.
+ */
+
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */
+#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */
+#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */
+#define S6D27A1_READID1 0xDA /* Read panel ID 1 */
+#define S6D27A1_READID2 0xDB /* Read panel ID 2 */
+#define S6D27A1_READID3 0xDC /* Read panel ID 3 */
+#define S6D27A1_DISPCTL 0xF2 /* Display Control */
+#define S6D27A1_MANPWR 0xF3 /* Manual Control */
+#define S6D27A1_PWRCTL1 0xF4 /* Power Control */
+#define S6D27A1_SRCCTL 0xF6 /* Source Control */
+#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/
+
+static const u8 s6d27a1_dbi_read_commands[] = {
+ S6D27A1_READID1,
+ S6D27A1_READID2,
+ S6D27A1_READID3,
+ 0, /* sentinel */
+};
+
+struct s6d27a1 {
+ struct device *dev;
+ struct mipi_dbi dbi;
+ struct drm_panel panel;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data regulators[2];
+};
+
+static const struct drm_display_mode s6d27a1_480_800_mode = {
+ /*
+ * The vendor driver states that the S6D27A1 panel
+ * has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz.
+ */
+ .clock = 24960,
+ .hdisplay = 480,
+ .hsync_start = 480 + 63,
+ .hsync_end = 480 + 63 + 2,
+ .htotal = 480 + 63 + 2 + 63,
+ .vdisplay = 800,
+ .vsync_start = 800 + 11,
+ .vsync_end = 800 + 11 + 2,
+ .vtotal = 800 + 11 + 2 + 10,
+ .width_mm = 50,
+ .height_mm = 84,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6d27a1, panel);
+}
+
+static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx)
+{
+ struct mipi_dbi *dbi = &ctx->dbi;
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 1\n");
+ return;
+ }
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 2\n");
+ return;
+ }
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 3\n");
+ return;
+ }
+ dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
+}
+
+static int s6d27a1_power_on(struct s6d27a1 *ctx)
+{
+ struct mipi_dbi *dbi = &ctx->dbi;
+ int ret;
+
+ /* Power up */
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+ if (ret) {
+ dev_err(ctx->dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+
+ /* Assert reset >=1 ms */
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ usleep_range(1000, 5000);
+ /* De-assert reset */
+ gpiod_set_value_cansleep(ctx->reset, 0);
+ /* Wait >= 10 ms */
+ msleep(20);
+
+ /*
+ * Exit sleep mode and initialize display - some hammering is
+ * necessary.
+ */
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(120);
+
+ /* Magic to unlock level 2 control of the display */
+ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A);
+
+ /* Configure resolution to 480RGBx800 */
+ mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22);
+
+ mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c);
+
+ mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00);
+
+ mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F);
+
+ mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x44, 0x05, 0x88, 0x4B, 0x50);
+
+ mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16);
+
+ mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08,
+ 0x01, 0x09, 0x0D, 0x0A, 0x0E,
+ 0x0B, 0x0F, 0x0C, 0x10, 0x01,
+ 0x11, 0x12, 0x13, 0x14, 0x05,
+ 0x06, 0x07, 0x08, 0x01, 0x09,
+ 0x0D, 0x0A, 0x0E, 0x0B, 0x0F,
+ 0x0C, 0x10, 0x01, 0x11, 0x12,
+ 0x13, 0x14);
+
+ /* lock the level 2 control */
+ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5);
+
+ s6d27a1_read_mtp_id(ctx);
+
+ return 0;
+}
+
+static int s6d27a1_power_off(struct s6d27a1 *ctx)
+{
+ /* Go into RESET and disable regulators */
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+}
+
+static int s6d27a1_unprepare(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(120);
+ return s6d27a1_power_off(to_s6d27a1(panel));
+}
+
+static int s6d27a1_disable(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(25);
+
+ return 0;
+}
+
+static int s6d27a1_prepare(struct drm_panel *panel)
+{
+ return s6d27a1_power_on(to_s6d27a1(panel));
+}
+
+static int s6d27a1_enable(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+
+ return 0;
+}
+
+static int s6d27a1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode);
+ if (!mode) {
+ dev_err(ctx->dev, "failed to add mode\n");
+ return -ENOMEM;
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bus_flags =
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6d27a1_drm_funcs = {
+ .disable = s6d27a1_disable,
+ .unprepare = s6d27a1_unprepare,
+ .prepare = s6d27a1_prepare,
+ .enable = s6d27a1_enable,
+ .get_modes = s6d27a1_get_modes,
+};
+
+static int s6d27a1_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct s6d27a1 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ /*
+ * VCI is the analog voltage supply
+ * VCCIO is the digital I/O voltage supply
+ */
+ ctx->regulators[0].supply = "vci";
+ ctx->regulators[1].supply = "vccio";
+ ret = devm_regulator_bulk_get(dev,
+ ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset)) {
+ ret = PTR_ERR(ctx->reset);
+ return dev_err_probe(dev, ret, "no RESET GPIO\n");
+ }
+
+ ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
+
+ ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
+
+ drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add backlight\n");
+
+ spi_set_drvdata(spi, ctx);
+
+ drm_panel_add(&ctx->panel);
+
+ return 0;
+}
+
+static int s6d27a1_remove(struct spi_device *spi)
+{
+ struct s6d27a1 *ctx = spi_get_drvdata(spi);
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id s6d27a1_match[] = {
+ { .compatible = "samsung,s6d27a1", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6d27a1_match);
+
+static struct spi_driver s6d27a1_driver = {
+ .probe = s6d27a1_probe,
+ .remove = s6d27a1_remove,
+ .driver = {
+ .name = "s6d27a1-panel",
+ .of_match_table = s6d27a1_match,
+ },
+};
+module_spi_driver(s6d27a1_driver);
+
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_DESCRIPTION("Samsung S6D27A1 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 0e4f3cac0fef..7f3e1b84b5f5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -36,8 +35,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -95,44 +92,6 @@ struct panel_desc {
unsigned int prepare;
/**
- * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
- *
- * Add this to the prepare delay if we know Hot Plug Detect
- * isn't used.
- */
- unsigned int hpd_absent_delay;
-
- /**
- * @delay.prepare_to_enable: Time between prepare and enable.
- *
- * The minimum time, in milliseconds, that needs to have passed
- * between when prepare finished and enable may begin. If at
- * enable time less time has passed since prepare finished,
- * the driver waits for the remaining time.
- *
- * If a fixed enable delay is also specified, we'll start
- * counting before delaying for the fixed delay.
- *
- * If a fixed prepare delay is also specified, we won't start
- * counting until after the fixed delay. We can't overlap this
- * fixed delay with the min time because the fixed delay
- * doesn't happen at the end of the function if a HPD GPIO was
- * specified.
- *
- * In other words:
- * prepare()
- * ...
- * // do fixed prepare delay
- * // wait for HPD GPIO if applicable
- * // start counting for prepare_to_enable
- *
- * enable()
- * // do fixed enable delay
- * // enforce prepare_to_enable min time
- */
- unsigned int prepare_to_enable;
-
- /**
* @delay.enable: Time for the panel to display a valid frame.
*
* The time (in milliseconds) that it takes for the panel to
@@ -176,7 +135,6 @@ struct panel_desc {
struct panel_simple {
struct drm_panel base;
bool enabled;
- bool no_hpd;
bool prepared;
@@ -187,10 +145,8 @@ struct panel_simple {
struct regulator *supply;
struct i2c_adapter *ddc;
- struct drm_dp_aux *aux;
struct gpio_desc *enable_gpio;
- struct gpio_desc *hpd_gpio;
struct edid *edid;
@@ -374,30 +330,10 @@ static int panel_simple_unprepare(struct drm_panel *panel)
return 0;
}
-static int panel_simple_get_hpd_gpio(struct device *dev, struct panel_simple *p)
-{
- int err;
-
- p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
- if (IS_ERR(p->hpd_gpio)) {
- err = PTR_ERR(p->hpd_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
-
- return err;
- }
-
- return 0;
-}
-
-static int panel_simple_prepare_once(struct panel_simple *p)
+static int panel_simple_resume(struct device *dev)
{
- struct device *dev = p->base.dev;
- unsigned int delay;
+ struct panel_simple *p = dev_get_drvdata(dev);
int err;
- int hpd_asserted;
- unsigned long hpd_wait_us;
panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
@@ -409,68 +345,12 @@ static int panel_simple_prepare_once(struct panel_simple *p)
gpiod_set_value_cansleep(p->enable_gpio, 1);
- delay = p->desc->delay.prepare;
- if (p->no_hpd)
- delay += p->desc->delay.hpd_absent_delay;
- if (delay)
- msleep(delay);
-
- if (p->hpd_gpio) {
- if (p->desc->delay.hpd_absent_delay)
- hpd_wait_us = p->desc->delay.hpd_absent_delay * 1000UL;
- else
- hpd_wait_us = 2000000;
-
- err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
- hpd_asserted, hpd_asserted,
- 1000, hpd_wait_us);
- if (hpd_asserted < 0)
- err = hpd_asserted;
-
- if (err) {
- if (err != -ETIMEDOUT)
- dev_err(dev,
- "error waiting for hpd GPIO: %d\n", err);
- goto error;
- }
- }
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
p->prepared_time = ktime_get();
return 0;
-
-error:
- gpiod_set_value_cansleep(p->enable_gpio, 0);
- regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
-
- return err;
-}
-
-/*
- * Some panels simply don't always come up and need to be power cycled to
- * work properly. We'll allow for a handful of retries.
- */
-#define MAX_PANEL_PREPARE_TRIES 5
-
-static int panel_simple_resume(struct device *dev)
-{
- struct panel_simple *p = dev_get_drvdata(dev);
- int ret;
- int try;
-
- for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
- ret = panel_simple_prepare_once(p);
- if (ret != -ETIMEDOUT)
- break;
- }
-
- if (ret == -ETIMEDOUT)
- dev_err(dev, "Prepare timeout after %d tries\n", try);
- else if (try)
- dev_warn(dev, "Prepare needed %d retries\n", try);
-
- return ret;
}
static int panel_simple_prepare(struct drm_panel *panel)
@@ -503,8 +383,6 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
- panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
-
p->enabled = true;
return 0;
@@ -660,8 +538,7 @@ static void panel_simple_parse_panel_timing_node(struct device *dev,
dev_err(dev, "Reject override mode: No display_timing found\n");
}
-static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
- struct drm_dp_aux *aux)
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct panel_simple *panel;
struct display_timing dt;
@@ -677,14 +554,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
panel->enabled = false;
panel->prepared_time = 0;
panel->desc = desc;
- panel->aux = aux;
-
- panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
- if (!panel->no_hpd) {
- err = panel_simple_get_hpd_gpio(dev, panel);
- if (err)
- return err;
- }
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -712,8 +581,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
if (!panel->ddc)
return -EPROBE_DEFER;
- } else if (aux) {
- panel->ddc = &aux->ddc;
}
if (desc == &panel_dpi) {
@@ -749,9 +616,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
desc->bpc != 8);
break;
case DRM_MODE_CONNECTOR_eDP:
- if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10)
- dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
- break;
+ dev_warn(dev, "eDP panels moved to panel-edp\n");
+ err = -EINVAL;
+ goto free_ddc;
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
@@ -798,15 +665,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
if (err)
goto disable_pm_runtime;
- if (!panel->base.backlight && panel->aux) {
- pm_runtime_get_sync(dev);
- err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- if (err)
- goto disable_pm_runtime;
- }
-
drm_panel_add(&panel->base);
return 0;
@@ -815,7 +673,7 @@ disable_pm_runtime:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
free_ddc:
- if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ if (panel->ddc)
put_device(&panel->ddc->dev);
return err;
@@ -831,7 +689,7 @@ static int panel_simple_remove(struct device *dev)
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
- if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ if (panel->ddc)
put_device(&panel->ddc->dev);
return 0;
@@ -970,28 +828,6 @@ static const struct panel_desc auo_b101aw03 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct display_timing auo_b101ean01_timing = {
- .pixelclock = { 65300000, 72500000, 75000000 },
- .hactive = { 1280, 1280, 1280 },
- .hfront_porch = { 18, 119, 119 },
- .hback_porch = { 21, 21, 21 },
- .hsync_len = { 32, 32, 32 },
- .vactive = { 800, 800, 800 },
- .vfront_porch = { 4, 4, 4 },
- .vback_porch = { 8, 8, 8 },
- .vsync_len = { 18, 20, 20 },
-};
-
-static const struct panel_desc auo_b101ean01 = {
- .timings = &auo_b101ean01_timing,
- .num_timings = 1,
- .bpc = 6,
- .size = {
- .width = 217,
- .height = 136,
- },
-};
-
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
@@ -1015,172 +851,6 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
-static const struct drm_display_mode auo_b116xak01_mode = {
- .clock = 69300,
- .hdisplay = 1366,
- .hsync_start = 1366 + 48,
- .hsync_end = 1366 + 48 + 32,
- .htotal = 1366 + 48 + 32 + 10,
- .vdisplay = 768,
- .vsync_start = 768 + 4,
- .vsync_end = 768 + 4 + 6,
- .vtotal = 768 + 4 + 6 + 15,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xak01 = {
- .modes = &auo_b116xak01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b116xw03_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
- .modes = &auo_b116xw03_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .enable = 400,
- },
- .bus_flags = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b133xtn01_mode = {
- .clock = 69500,
- .hdisplay = 1366,
- .hsync_start = 1366 + 48,
- .hsync_end = 1366 + 48 + 32,
- .htotal = 1366 + 48 + 32 + 20,
- .vdisplay = 768,
- .vsync_start = 768 + 3,
- .vsync_end = 768 + 3 + 6,
- .vtotal = 768 + 3 + 6 + 13,
-};
-
-static const struct panel_desc auo_b133xtn01 = {
- .modes = &auo_b133xtn01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 293,
- .height = 165,
- },
-};
-
-static const struct drm_display_mode auo_b133han05_mode = {
- .clock = 142600,
- .hdisplay = 1920,
- .hsync_start = 1920 + 58,
- .hsync_end = 1920 + 58 + 42,
- .htotal = 1920 + 58 + 42 + 60,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 54,
-};
-
-static const struct panel_desc auo_b133han05 = {
- .modes = &auo_b133han05_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 293,
- .height = 165,
- },
- .delay = {
- .prepare = 100,
- .enable = 20,
- .unprepare = 50,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b133htn01_mode = {
- .clock = 150660,
- .hdisplay = 1920,
- .hsync_start = 1920 + 172,
- .hsync_end = 1920 + 172 + 80,
- .htotal = 1920 + 172 + 80 + 60,
- .vdisplay = 1080,
- .vsync_start = 1080 + 25,
- .vsync_end = 1080 + 25 + 10,
- .vtotal = 1080 + 25 + 10 + 10,
-};
-
-static const struct panel_desc auo_b133htn01 = {
- .modes = &auo_b133htn01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 293,
- .height = 165,
- },
- .delay = {
- .prepare = 105,
- .enable = 20,
- .unprepare = 50,
- },
-};
-
-static const struct drm_display_mode auo_b140han06_mode = {
- .clock = 141000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 16,
- .hsync_end = 1920 + 16 + 16,
- .htotal = 1920 + 16 + 16 + 152,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 14,
- .vtotal = 1080 + 3 + 14 + 19,
-};
-
-static const struct panel_desc auo_b140han06 = {
- .modes = &auo_b140han06_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 309,
- .height = 174,
- },
- .delay = {
- .prepare = 100,
- .enable = 20,
- .unprepare = 50,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
@@ -1524,169 +1194,6 @@ static const struct panel_desc boe_hv070wsa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
- {
- .clock = 71900,
- .hdisplay = 1280,
- .hsync_start = 1280 + 48,
- .hsync_end = 1280 + 48 + 32,
- .htotal = 1280 + 48 + 32 + 80,
- .vdisplay = 800,
- .vsync_start = 800 + 3,
- .vsync_end = 800 + 3 + 5,
- .vtotal = 800 + 3 + 5 + 24,
- },
- {
- .clock = 57500,
- .hdisplay = 1280,
- .hsync_start = 1280 + 48,
- .hsync_end = 1280 + 48 + 32,
- .htotal = 1280 + 48 + 32 + 80,
- .vdisplay = 800,
- .vsync_start = 800 + 3,
- .vsync_end = 800 + 3 + 5,
- .vtotal = 800 + 3 + 5 + 24,
- },
-};
-
-static const struct panel_desc boe_nv101wxmn51 = {
- .modes = boe_nv101wxmn51_modes,
- .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
- .bpc = 8,
- .size = {
- .width = 217,
- .height = 136,
- },
- .delay = {
- .prepare = 210,
- .enable = 50,
- .unprepare = 160,
- },
-};
-
-static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
- {
- .clock = 207800,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 100,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 6,
- .vtotal = 1440 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
- },
- {
- .clock = 138500,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 100,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 6,
- .vtotal = 1440 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
- },
-};
-
-static const struct panel_desc boe_nv110wtm_n61 = {
- .modes = boe_nv110wtm_n61_modes,
- .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
- .bpc = 8,
- .size = {
- .width = 233,
- .height = 155,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .prepare_to_enable = 80,
- .enable = 50,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-/* Also used for boe_nv133fhm_n62 */
-static const struct drm_display_mode boe_nv133fhm_n61_modes = {
- .clock = 147840,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 200,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 6,
- .vtotal = 1080 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
-/* Also used for boe_nv133fhm_n62 */
-static const struct panel_desc boe_nv133fhm_n61 = {
- .modes = &boe_nv133fhm_n61_modes,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 294,
- .height = 165,
- },
- .delay = {
- /*
- * When power is first given to the panel there's a short
- * spike on the HPD line. It was explained that this spike
- * was until the TCON data download was complete. On
- * one system this was measured at 8 ms. We'll put 15 ms
- * in the prepare delay just to be safe and take it away
- * from the hpd_absent_delay (which would otherwise be 200 ms)
- * to handle this. That means:
- * - If HPD isn't hooked up you still have 200 ms delay.
- * - If HPD is hooked up we won't try to look at it for the
- * first 15 ms.
- */
- .prepare = 15,
- .hpd_absent_delay = 185,
-
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
- {
- .clock = 148500,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 2200,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1125,
- },
-};
-
-static const struct panel_desc boe_nv140fhmn49 = {
- .modes = boe_nv140fhmn49_modes,
- .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
- .bpc = 6,
- .size = {
- .width = 309,
- .height = 174,
- },
- .delay = {
- .prepare = 210,
- .enable = 50,
- .unprepare = 160,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2609,96 +2116,6 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
-static const struct drm_display_mode innolux_n116bca_ea1_mode = {
- .clock = 76420,
- .hdisplay = 1366,
- .hsync_start = 1366 + 136,
- .hsync_end = 1366 + 136 + 30,
- .htotal = 1366 + 136 + 30 + 60,
- .vdisplay = 768,
- .vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 12,
- .vtotal = 768 + 8 + 12 + 12,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
-static const struct panel_desc innolux_n116bca_ea1 = {
- .modes = &innolux_n116bca_ea1_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .prepare_to_enable = 80,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-/*
- * Datasheet specifies that at 60 Hz refresh rate:
- * - total horizontal time: { 1506, 1592, 1716 }
- * - total vertical time: { 788, 800, 868 }
- *
- * ...but doesn't go into exactly how that should be split into a front
- * porch, back porch, or sync length. For now we'll leave a single setting
- * here which allows a bit of tweaking of the pixel clock at the expense of
- * refresh rate.
- */
-static const struct display_timing innolux_n116bge_timing = {
- .pixelclock = { 72600000, 76420000, 80240000 },
- .hactive = { 1366, 1366, 1366 },
- .hfront_porch = { 136, 136, 136 },
- .hback_porch = { 60, 60, 60 },
- .hsync_len = { 30, 30, 30 },
- .vactive = { 768, 768, 768 },
- .vfront_porch = { 8, 8, 8 },
- .vback_porch = { 12, 12, 12 },
- .vsync_len = { 12, 12, 12 },
- .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
-};
-
-static const struct panel_desc innolux_n116bge = {
- .timings = &innolux_n116bge_timing,
- .num_timings = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode innolux_n125hce_gn1_mode = {
- .clock = 162000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 40,
- .hsync_end = 1920 + 40 + 40,
- .htotal = 1920 + 40 + 40 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 4,
- .vsync_end = 1080 + 4 + 4,
- .vtotal = 1080 + 4 + 4 + 24,
-};
-
-static const struct panel_desc innolux_n125hce_gn1 = {
- .modes = &innolux_n125hce_gn1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 276,
- .height = 155,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -2724,33 +2141,6 @@ static const struct panel_desc innolux_n156bge_l21 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
- .clock = 206016,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 80,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 10,
- .vtotal = 1440 + 3 + 10 + 27,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc innolux_p120zdg_bf1 = {
- .modes = &innolux_p120zdg_bf1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 254,
- .height = 169,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .unprepare = 500,
- },
-};
-
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -2773,64 +2163,6 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
-static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
- .clock = 138778,
- .hdisplay = 1920,
- .hsync_start = 1920 + 24,
- .hsync_end = 1920 + 24 + 48,
- .htotal = 1920 + 24 + 48 + 88,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 12,
- .vtotal = 1080 + 3 + 12 + 17,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc ivo_m133nwf4_r0 = {
- .modes = &ivo_m133nwf4_r0_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 294,
- .height = 165,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
- .clock = 81000,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 32,
- .htotal = 1366 + 40 + 32 + 62,
- .vdisplay = 768,
- .vsync_start = 768 + 5,
- .vsync_end = 768 + 5 + 5,
- .vtotal = 768 + 5 + 5 + 122,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
- .modes = &kingdisplay_kd116n21_30nv_a010_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2982,94 +2314,6 @@ static const struct panel_desc lg_lb070wv8 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
- .clock = 200000,
- .hdisplay = 1536,
- .hsync_start = 1536 + 12,
- .hsync_end = 1536 + 12 + 16,
- .htotal = 1536 + 12 + 16 + 48,
- .vdisplay = 2048,
- .vsync_start = 2048 + 8,
- .vsync_end = 2048 + 8 + 4,
- .vtotal = 2048 + 8 + 4 + 8,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc lg_lp079qx1_sp0v = {
- .modes = &lg_lp079qx1_sp0v_mode,
- .num_modes = 1,
- .size = {
- .width = 129,
- .height = 171,
- },
-};
-
-static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
- .clock = 205210,
- .hdisplay = 2048,
- .hsync_start = 2048 + 150,
- .hsync_end = 2048 + 150 + 5,
- .htotal = 2048 + 150 + 5 + 5,
- .vdisplay = 1536,
- .vsync_start = 1536 + 3,
- .vsync_end = 1536 + 3 + 1,
- .vtotal = 1536 + 3 + 1 + 9,
-};
-
-static const struct panel_desc lg_lp097qx1_spa1 = {
- .modes = &lg_lp097qx1_spa1_mode,
- .num_modes = 1,
- .size = {
- .width = 208,
- .height = 147,
- },
-};
-
-static const struct drm_display_mode lg_lp120up1_mode = {
- .clock = 162300,
- .hdisplay = 1920,
- .hsync_start = 1920 + 40,
- .hsync_end = 1920 + 40 + 40,
- .htotal = 1920 + 40 + 40+ 80,
- .vdisplay = 1280,
- .vsync_start = 1280 + 4,
- .vsync_end = 1280 + 4 + 4,
- .vtotal = 1280 + 4 + 4 + 12,
-};
-
-static const struct panel_desc lg_lp120up1 = {
- .modes = &lg_lp120up1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 267,
- .height = 183,
- },
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode lg_lp129qe_mode = {
- .clock = 285250,
- .hdisplay = 2560,
- .hsync_start = 2560 + 48,
- .hsync_end = 2560 + 48 + 32,
- .htotal = 2560 + 48 + 32 + 80,
- .vdisplay = 1700,
- .vsync_start = 1700 + 3,
- .vsync_end = 1700 + 3 + 10,
- .vtotal = 1700 + 3 + 10 + 36,
-};
-
-static const struct panel_desc lg_lp129qe = {
- .modes = &lg_lp129qe_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 272,
- .height = 181,
- },
-};
-
static const struct display_timing logictechno_lt161010_2nh_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
@@ -3158,19 +2402,6 @@ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
-static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
- .clock = 30400,
- .hdisplay = 800,
- .hsync_start = 800 + 0,
- .hsync_end = 800 + 1,
- .htotal = 800 + 0 + 1 + 160,
- .vdisplay = 480,
- .vsync_start = 480 + 0,
- .vsync_end = 480 + 48 + 1,
- .vtotal = 480 + 48 + 1 + 0,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
static const struct drm_display_mode logicpd_type_28_mode = {
.clock = 9107,
.hdisplay = 480,
@@ -3205,6 +2436,19 @@ static const struct panel_desc logicpd_type_28 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
+ .clock = 30400,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 1,
+ .htotal = 800 + 0 + 1 + 160,
+ .vdisplay = 480,
+ .vsync_start = 480 + 0,
+ .vsync_end = 480 + 48 + 1,
+ .vtotal = 480 + 48 + 1 + 0,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,
@@ -3330,49 +2574,6 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
-static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
- {
- .clock = 138500,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 23,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
- }, {
- .clock = 110920,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 23,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
- }
-};
-
-static const struct panel_desc neweast_wjfh116008a = {
- .modes = neweast_wjfh116008a_modes,
- .num_modes = 2,
- .bpc = 6,
- .size = {
- .width = 260,
- .height = 150,
- },
- .delay = {
- .prepare = 110,
- .enable = 20,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -3783,27 +2984,6 @@ static const struct panel_desc rocktech_rk101ii01d_ct = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
- .clock = 271560,
- .hdisplay = 2560,
- .hsync_start = 2560 + 48,
- .hsync_end = 2560 + 48 + 32,
- .htotal = 2560 + 48 + 32 + 80,
- .vdisplay = 1600,
- .vsync_start = 1600 + 2,
- .vsync_end = 1600 + 2 + 5,
- .vtotal = 1600 + 2 + 5 + 57,
-};
-
-static const struct panel_desc samsung_lsn122dl01_c01 = {
- .modes = &samsung_lsn122dl01_c01_mode,
- .num_modes = 1,
- .size = {
- .width = 263,
- .height = 164,
- },
-};
-
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -3829,28 +3009,6 @@ static const struct panel_desc samsung_ltn101nt05 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode samsung_ltn140at29_301_mode = {
- .clock = 76300,
- .hdisplay = 1366,
- .hsync_start = 1366 + 64,
- .hsync_end = 1366 + 64 + 48,
- .htotal = 1366 + 64 + 48 + 128,
- .vdisplay = 768,
- .vsync_start = 768 + 2,
- .vsync_end = 768 + 2 + 5,
- .vtotal = 768 + 2 + 5 + 17,
-};
-
-static const struct panel_desc samsung_ltn140at29_301 = {
- .modes = &samsung_ltn140at29_301_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 320,
- .height = 187,
- },
-};
-
static const struct display_timing satoz_sat050at40h12r2_timing = {
.pixelclock = {33300000, 33300000, 50000000},
.hactive = {800, 800, 800},
@@ -3875,31 +3033,6 @@ static const struct panel_desc satoz_sat050at40h12r2 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
- .clock = 168480,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1280,
- .vsync_start = 1280 + 3,
- .vsync_end = 1280 + 3 + 10,
- .vtotal = 1280 + 3 + 10 + 57,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc sharp_ld_d5116z01b = {
- .modes = &sharp_ld_d5116z01b_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 260,
- .height = 120,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
-};
-
static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -3974,34 +3107,6 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct display_timing sharp_lq123p1jx31_timing = {
- .pixelclock = { 252750000, 252750000, 266604720 },
- .hactive = { 2400, 2400, 2400 },
- .hfront_porch = { 48, 48, 48 },
- .hback_porch = { 80, 80, 84 },
- .hsync_len = { 32, 32, 32 },
- .vactive = { 1600, 1600, 1600 },
- .vfront_porch = { 3, 3, 3 },
- .vback_porch = { 33, 33, 120 },
- .vsync_len = { 10, 10, 10 },
- .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
-};
-
-static const struct panel_desc sharp_lq123p1jx31 = {
- .timings = &sharp_lq123p1jx31_timing,
- .num_timings = 1,
- .bpc = 8,
- .size = {
- .width = 259,
- .height = 173,
- },
- .delay = {
- .prepare = 110,
- .enable = 50,
- .unprepare = 550,
- },
-};
-
static const struct drm_display_mode sharp_ls020b1dd01d_modes[] = {
{ /* 50 Hz */
.clock = 3000,
@@ -4090,33 +3195,6 @@ static const struct panel_desc starry_kr070pe2t = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
-static const struct drm_display_mode starry_kr122ea0sra_mode = {
- .clock = 147000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 16,
- .hsync_end = 1920 + 16 + 16,
- .htotal = 1920 + 16 + 16 + 32,
- .vdisplay = 1200,
- .vsync_start = 1200 + 15,
- .vsync_end = 1200 + 15 + 2,
- .vtotal = 1200 + 15 + 2 + 18,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc starry_kr122ea0sra = {
- .modes = &starry_kr122ea0sra_mode,
- .num_modes = 1,
- .size = {
- .width = 263,
- .height = 164,
- },
- .delay = {
- .prepare = 10 + 200,
- .enable = 50,
- .unprepare = 10 + 500,
- },
-};
-
static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.clock = 30000,
.hdisplay = 800,
@@ -4428,7 +3506,7 @@ static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode
static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
.modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
.num_modes = 1,
- .bpc = 6,
+ .bpc = 8,
.size = {
.width = 154,
.height = 90,
@@ -4484,30 +3562,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
- .compatible = "auo,b101ean01",
- .data = &auo_b101ean01,
- }, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
- .compatible = "auo,b116xa01",
- .data = &auo_b116xak01,
- }, {
- .compatible = "auo,b116xw03",
- .data = &auo_b116xw03,
- }, {
- .compatible = "auo,b133han05",
- .data = &auo_b133han05,
- }, {
- .compatible = "auo,b133htn01",
- .data = &auo_b133htn01,
- }, {
- .compatible = "auo,b140han06",
- .data = &auo_b140han06,
- }, {
- .compatible = "auo,b133xtn01",
- .data = &auo_b133xtn01,
- }, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
@@ -4547,21 +3604,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
- .compatible = "boe,nv101wxmn51",
- .data = &boe_nv101wxmn51,
- }, {
- .compatible = "boe,nv110wtm-n61",
- .data = &boe_nv110wtm_n61,
- }, {
- .compatible = "boe,nv133fhm-n61",
- .data = &boe_nv133fhm_n61,
- }, {
- .compatible = "boe,nv133fhm-n62",
- .data = &boe_nv133fhm_n61,
- }, {
- .compatible = "boe,nv140fhmn49",
- .data = &boe_nv140fhmn49,
- }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -4673,30 +3715,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
- .compatible = "innolux,n116bca-ea1",
- .data = &innolux_n116bca_ea1,
- }, {
- .compatible = "innolux,n116bge",
- .data = &innolux_n116bge,
- }, {
- .compatible = "innolux,n125hce-gn1",
- .data = &innolux_n125hce_gn1,
- }, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
- .compatible = "innolux,p120zdg-bf1",
- .data = &innolux_p120zdg_bf1,
- }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
- .compatible = "ivo,m133nwf4-r0",
- .data = &ivo_m133nwf4_r0,
- }, {
- .compatible = "kingdisplay,kd116n21-30nv-a010",
- .data = &kingdisplay_kd116n21_30nv_a010,
- }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -4715,18 +3739,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
- .compatible = "lg,lp079qx1-sp0v",
- .data = &lg_lp079qx1_sp0v,
- }, {
- .compatible = "lg,lp097qx1-spa1",
- .data = &lg_lp097qx1_spa1,
- }, {
- .compatible = "lg,lp120up1",
- .data = &lg_lp120up1,
- }, {
- .compatible = "lg,lp129qe",
- .data = &lg_lp129qe,
- }, {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
@@ -4757,9 +3769,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
- .compatible = "neweast,wjfh116008a",
- .data = &neweast_wjfh116008a,
- }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -4808,21 +3817,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk101ii01d-ct",
.data = &rocktech_rk101ii01d_ct,
}, {
- .compatible = "samsung,lsn122dl01-c01",
- .data = &samsung_lsn122dl01_c01,
- }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "samsung,ltn140at29-301",
- .data = &samsung_ltn140at29_301,
- }, {
.compatible = "satoz,sat050at40h12r2",
.data = &satoz_sat050at40h12r2,
}, {
- .compatible = "sharp,ld-d5116z01b",
- .data = &sharp_ld_d5116z01b,
- }, {
.compatible = "sharp,lq035q7db03",
.data = &sharp_lq035q7db03,
}, {
@@ -4832,9 +3832,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
- .compatible = "sharp,lq123p1jx31",
- .data = &sharp_lq123p1jx31,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -4844,9 +3841,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr070pe2t",
.data = &starry_kr070pe2t,
}, {
- .compatible = "starry,kr122ea0sra",
- .data = &starry_kr122ea0sra,
- }, {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
@@ -4918,7 +3912,7 @@ static int panel_simple_platform_probe(struct platform_device *pdev)
if (!id)
return -ENODEV;
- return panel_simple_probe(&pdev->dev, id->data, NULL);
+ return panel_simple_probe(&pdev->dev, id->data);
}
static int panel_simple_platform_remove(struct platform_device *pdev)
@@ -5198,7 +4192,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
desc = id->data;
- err = panel_simple_probe(&dsi->dev, &desc->desc, NULL);
+ err = panel_simple_probe(&dsi->dev, &desc->desc);
if (err < 0)
return err;
@@ -5243,38 +4237,6 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = {
.shutdown = panel_simple_dsi_shutdown,
};
-static int panel_simple_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
-{
- const struct of_device_id *id;
-
- id = of_match_node(platform_of_match, aux_ep->dev.of_node);
- if (!id)
- return -ENODEV;
-
- return panel_simple_probe(&aux_ep->dev, id->data, aux_ep->aux);
-}
-
-static void panel_simple_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
-{
- panel_simple_remove(&aux_ep->dev);
-}
-
-static void panel_simple_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
-{
- panel_simple_shutdown(&aux_ep->dev);
-}
-
-static struct dp_aux_ep_driver panel_simple_dp_aux_ep_driver = {
- .driver = {
- .name = "panel-simple-dp-aux",
- .of_match_table = platform_of_match, /* Same as platform one! */
- .pm = &panel_simple_pm_ops,
- },
- .probe = panel_simple_dp_aux_ep_probe,
- .remove = panel_simple_dp_aux_ep_remove,
- .shutdown = panel_simple_dp_aux_ep_shutdown,
-};
-
static int __init panel_simple_init(void)
{
int err;
@@ -5283,21 +4245,14 @@ static int __init panel_simple_init(void)
if (err < 0)
return err;
- err = dp_aux_dp_driver_register(&panel_simple_dp_aux_ep_driver);
- if (err < 0)
- goto err_did_platform_register;
-
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
if (err < 0)
- goto err_did_aux_ep_register;
+ goto err_did_platform_register;
}
return 0;
-err_did_aux_ep_register:
- dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
-
err_did_platform_register:
platform_driver_unregister(&panel_simple_platform_driver);
@@ -5310,7 +4265,6 @@ static void __exit panel_simple_exit(void)
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
- dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
platform_driver_unregister(&panel_simple_platform_driver);
}
module_exit(panel_simple_exit);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index bd9b7be63b0f..7f51a4682ccb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -198,7 +198,6 @@ err:
int panfrost_device_init(struct panfrost_device *pfdev)
{
int err;
- struct resource *res;
mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
@@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto out_reset;
- res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
- pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_pm_domain;
@@ -400,8 +398,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
#ifdef CONFIG_PM
int panfrost_device_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
@@ -411,8 +408,7 @@ int panfrost_device_resume(struct device *dev)
int panfrost_device_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
if (!panfrost_job_is_idle(pfdev))
return -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ffaef5ec5ff..82ad9a67f251 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
if (ret)
goto fail;
- ret = drm_gem_fence_array_add(&job->deps, fence);
+ ret = drm_sched_job_add_dependency(&job->base, fence);
if (ret)
goto fail;
@@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
- int ret = 0;
+ int ret = 0, slot;
if (!args->jc)
return -EINVAL;
@@ -253,38 +253,47 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
- goto fail_out_sync;
+ goto out_put_syncout;
}
kref_init(&job->refcount);
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
job->pfdev = pfdev;
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->file_priv = file->driver_priv;
+ slot = panfrost_job_get_slot(job);
+
+ ret = drm_sched_job_init(&job->base,
+ &job->file_priv->sched_entity[slot],
+ NULL);
+ if (ret)
+ goto out_put_job;
+
ret = panfrost_copy_in_sync(dev, file, args, job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
ret = panfrost_lookup_bos(dev, file, args, job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
ret = panfrost_job_push(job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
/* Update the return sync object for the job */
if (sync_out)
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
-fail_job:
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&job->base);
+out_put_job:
panfrost_job_put(job);
-fail_out_sync:
+out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
@@ -629,8 +638,8 @@ static const struct panfrost_compatible amlogic_data = {
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
-const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
.supply_names = mediatek_mt8183_supplies,
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 71a72fb50e6b..908d79520853 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
return &fence->base;
}
-static int panfrost_job_get_slot(struct panfrost_job *job)
+int panfrost_job_get_slot(struct panfrost_job *job)
{
/* JS0: fragment jobs.
* JS1: vertex/tiler jobs
@@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
*/
affinity = pfdev->features.shader_present;
- job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
- job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
+ job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
+ job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
}
static u32
@@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
- job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
- job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
panfrost_job_write_affinity(pfdev, job->requirements, js);
@@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int bo_count,
- struct xarray *deps)
+ struct drm_sched_job *job)
{
int i, ret;
for (i = 0; i < bo_count; i++) {
/* panfrost always uses write mode in its current uapi */
- ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ true);
if (ret)
return ret;
}
@@ -269,29 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int panfrost_job_push(struct panfrost_job *job)
{
struct panfrost_device *pfdev = job->pfdev;
- int slot = panfrost_job_get_slot(job);
- struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
-
ret = drm_gem_lock_reservations(job->bos, job->bo_count,
&acquire_ctx);
if (ret)
return ret;
mutex_lock(&pfdev->sched_lock);
-
- ret = drm_sched_job_init(&job->base, entity, NULL);
- if (ret) {
- mutex_unlock(&pfdev->sched_lock);
- goto unlock;
- }
+ drm_sched_job_arm(&job->base);
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
- &job->deps);
+ &job->base);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
@@ -299,7 +292,7 @@ int panfrost_job_push(struct panfrost_job *job)
kref_get(&job->refcount); /* put by scheduler job completion */
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
mutex_unlock(&pfdev->sched_lock);
@@ -316,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
{
struct panfrost_job *job = container_of(ref, struct panfrost_job,
refcount);
- struct dma_fence *fence;
- unsigned long index;
unsigned int i;
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
@@ -363,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
panfrost_job_put(job);
}
-static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct panfrost_job *job = to_panfrost_job(sched_job);
-
- if (!xa_empty(&job->deps))
- return xa_erase(&job->deps, job->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -763,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
- .dependency = panfrost_job_dependency,
.run_job = panfrost_job_run,
.timedout_job = panfrost_job_timedout,
.free_job = panfrost_job_free
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 82306a03b57e..77e6d0e6f612 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -19,10 +19,6 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_file_priv *file_priv;
- /* Contains both explicit and implicit fences */
- struct xarray deps;
- unsigned long last_dep;
-
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
void panfrost_job_fini(struct panfrost_device *pfdev);
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 0da5b3100ab1..f51d3f791a17 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,36 +58,43 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size)
+ u64 region_start, u64 size)
{
u8 region_width;
- u64 region = iova & PAGE_MASK;
+ u64 region;
+ u64 region_end = region_start + size;
+
+ if (!size)
+ return;
+
/*
- * fls returns:
- * 1 .. 32
- *
- * 10 + fls(num_pages)
- * results in the range (11 .. 42)
+ * The locked region is a naturally aligned power of 2 block encoded as
+ * log2 minus(1).
+ * Calculate the desired start/end and look for the highest bit which
+ * differs. The smallest naturally aligned block must include this bit
+ * change, the desired region starts with this bit (and subsequent bits)
+ * zeroed and ends with the bit (and subsequent bits) set to one.
*/
+ region_width = max(fls64(region_start ^ (region_end - 1)),
+ const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
- size = round_up(size, PAGE_SIZE);
+ /*
+ * Mask off the low bits of region_start (which would be ignored by
+ * the hardware anyway)
+ */
+ region_start &= GENMASK_ULL(63, region_width);
- region_width = 10 + fls(size >> PAGE_SHIFT);
- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
- /* not pow2, so must go up to the next pow2 */
- region_width += 1;
- }
- region |= region_width;
+ region = region_width | region_start;
/* Lock the region that needs to be updated */
- mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
- mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
+ mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
+ mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
if (as_nr < 0)
return 0;
@@ -104,7 +111,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
int ret;
@@ -121,23 +128,23 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
- mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
- mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
- mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
- mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
@@ -251,7 +258,7 @@ static size_t get_pgsize(u64 addr, size_t size)
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
if (mmu->as < 0)
return;
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 5ab03d605f57..e116a4d9b8e5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
- gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
- gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
+ gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
+ gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
gpu_write(pfdev, GPU_INT_CLEAR,
GPU_IRQ_CLEAN_CACHES_COMPLETED |
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 1940ff86e49a..6c5a11ef1ee8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -316,6 +316,8 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define gpu_read(dev, reg) readl(dev->iomem + reg)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b19f2f00b215..469979cd0341 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -36,10 +36,10 @@
/* manage releaseables */
/* stack them 16 high for now -drawable object is 191 */
#define RELEASE_SIZE 256
-#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
#define SURFACE_RELEASE_SIZE 128
-#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 37a1b6a6ad6d..b2e33d5ba5d0 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
*/
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(ttm);
}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 0ecccf25a3c7..26001c2de9e9 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
- pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, entry->busaddr[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
@@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
+ if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = -EFAULT;
@@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
+ entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index e9c47ec28ade..73e3117420bf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = dma_fence_signal_locked(&fence->base);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
-
+ dma_fence_signal_locked(&fence->base);
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
dma_fence_put(&fence->base);
- } else
- DMA_FENCE_TRACE(&fence->base, "pending\n");
+ }
return 0;
}
@@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
dma_fence_get(f);
-
- DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return true;
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
- int ret;
-
- ret = dma_fence_signal(&fence->base);
- if (!ret)
- DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ dma_fence_signal(&fence->base);
return true;
}
return false;
@@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
- int r_sig;
/*
* This function should not be called on !radeon fences.
@@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = dma_fence_signal(&fence->base);
- if (!r_sig)
- DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ dma_fence_signal(&fence->base);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a06d4cc2fb1c..7793249bc549 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t
{
struct radeon_ttm_tt *gtt = (void *)ttm;
- radeon_ttm_backend_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
-
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@@ -574,6 +571,8 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ radeon_ttm_tt_unbind(bdev, ttm);
+
if (gtt && gtt->userptr) {
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
@@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct radeon_device *rdev = radeon_get_rdev(bdev);
if (rdev->flags & RADEON_IS_AGP) {
- ttm_agp_unbind(ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 558f1b58bd69..9f1ecefc3933 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -9,7 +9,6 @@ config DRM_ROCKCHIP
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
- select DRM_RGB if ROCKCHIP_RGB
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ade2327a10e2..8abb5ac26807 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -467,6 +467,6 @@ struct platform_driver rockchip_dp_driver = {
.driver = {
.name = "rockchip-dp",
.pm = &rockchip_dp_pm_ops,
- .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
+ .of_match_table = rockchip_dp_dt_ids,
},
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 8ab3247dbc4a..16497c31d9f9 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -697,7 +697,6 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
struct device *dev = dp->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
@@ -705,8 +704,7 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
return PTR_ERR(dp->grf);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->regs = devm_ioremap_resource(dev, res);
+ dp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dp->regs)) {
DRM_DEV_ERROR(dev, "ioremap reg failed\n");
return PTR_ERR(dp->regs);
@@ -1123,7 +1121,7 @@ static int cdn_dp_suspend(struct device *dev)
return ret;
}
-static int cdn_dp_resume(struct device *dev)
+static __maybe_unused int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index a2262bee5aa4..a9acbcc420d0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
-#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <video/mipi_display.h>
@@ -643,7 +642,7 @@ struct hstt {
}
/* Table A-3 High-Speed Transition Times */
-struct hstt hstt_table[] = {
+static struct hstt hstt_table[] = {
HSTT( 90, 32, 20, 26, 13),
HSTT( 100, 35, 23, 28, 14),
HSTT( 110, 32, 22, 26, 13),
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 7afdc54eb3ec..046e8ec2a71c 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -810,7 +810,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct inno_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -821,8 +820,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index bfba9793d238..e4ebe60b3cc1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -138,9 +138,6 @@ static int rockchip_drm_bind(struct device *dev)
drm_dev->dev_private = private;
- INIT_LIST_HEAD(&private->psr_list);
- mutex_init(&private->psr_list_lock);
-
ret = rockchip_drm_init_iommu(drm_dev);
if (ret)
goto err_free;
@@ -275,10 +272,17 @@ int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
return -ENODEV;
/* status disabled will prevent creation of platform-devices */
+ if (!of_device_is_available(node)) {
+ of_node_put(node);
+ return -ENODEV;
+ }
+
pdev = of_find_device_by_node(node);
of_node_put(node);
+
+ /* enabled non-platform-devices can immediately return here */
if (!pdev)
- return -ENODEV;
+ return false;
/*
* All rockchip subdrivers have probed at this point, so
@@ -370,7 +374,7 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
}
iommu = of_parse_phandle(port->parent, "iommus", 0);
- if (!iommu || !of_device_is_available(iommu->parent)) {
+ if (!iommu || !of_device_is_available(iommu)) {
DRM_DEV_DEBUG(dev,
"no iommu attached for %pOF, using non-iommu buffers\n",
port->parent);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index e33c2dcd0d4b..aa0909e8edf9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -48,8 +48,6 @@ struct rockchip_drm_private {
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
- struct list_head psr_list;
- struct mutex psr_list_lock;
};
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 551653940e39..be74c87a8be4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -439,11 +440,9 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->regs = devm_ioremap_resource(lvds->dev, res);
+ lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
@@ -612,9 +611,9 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
+ connector = &lvds->connector;
if (lvds->panel) {
- connector = &lvds->connector;
connector->dpms = DRM_MODE_DPMS_OFF;
ret = drm_connector_init(drm_dev, connector,
&rockchip_lvds_connector_funcs,
@@ -627,17 +626,27 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to attach encoder: %d\n", ret);
- goto err_free_connector;
- }
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
+
+ connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ ret = PTR_ERR(connector);
+ goto err_free_encoder;
+ }
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index d691d9bef8e7..09be9678f2bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -27,6 +28,7 @@ struct rockchip_rgb {
struct drm_device *drm_dev;
struct drm_bridge *bridge;
struct drm_encoder encoder;
+ struct drm_connector connector;
int output_mode;
};
@@ -80,6 +82,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
int ret = 0, child_count = 0;
struct drm_panel *panel;
struct drm_bridge *bridge;
+ struct drm_connector *connector;
rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
@@ -142,12 +145,32 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
+ connector = &rgb->connector;
+ connector = drm_bridge_connector_init(rgb->drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ ret = PTR_ERR(connector);
+ goto err_free_encoder;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
+ }
+
return rgb;
+err_free_connector:
+ drm_connector_cleanup(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
return ERR_PTR(ret);
@@ -157,6 +180,7 @@ EXPORT_SYMBOL_GPL(rockchip_rgb_init);
void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
+ drm_connector_cleanup(&rgb->connector);
drm_encoder_cleanup(&rgb->encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index ca7cc82125cb..1f7353f0684a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1124,6 +1124,6 @@ struct platform_driver vop_platform_driver = {
.remove = vop_remove,
.driver = {
.name = "rockchip-vop",
- .of_match_table = of_match_ptr(vop_driver_dt_match),
+ .of_match_table = vop_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 79554aa4dbb1..27e1573af96e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -45,8 +45,14 @@
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
- * Note: the sched_list should have at least one element to schedule
- * the entity
+ * Note that the &sched_list must have at least one element to schedule the entity.
+ *
+ * For changing @priority later on at runtime see
+ * drm_sched_entity_set_priority(). For changing the set of schedulers
+ * @sched_list at runtime see drm_sched_entity_modify_sched().
+ *
+ * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
*/
@@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init);
* @sched_list: the list of new drm scheds which will replace
* existing entity->sched_list
* @num_sched_list: number of drm sched in sched_list
+ *
+ * Note that this must be called under the same common lock for @entity as
+ * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
+ * guarantee through some other means that this is never called while new jobs
+ * can be pushed to @entity.
*/
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
@@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
-/**
- * drm_sched_entity_is_idle - Check if entity is idle
- *
- * @entity: scheduler entity
- *
- * Returns true if the entity does not have any unscheduled jobs.
- */
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
@@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
-/**
- * drm_sched_entity_is_ready - Check if entity is ready
- *
- * @entity: scheduler entity
- *
- * Return true if entity could provide a job.
- */
+/* Return true if entity could provide a job. */
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
@@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-/**
- * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
- *
- * @f: signaled fence
- * @cb: our callback structure
- *
- * Signal the scheduler finished fence when the entity in question is killed.
- */
+/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
job->sched->ops->free_job(job);
}
-/**
- * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
- *
- * @entity: entity which is cleaned up
- *
- * Makes sure that all remaining jobs in an entity are killed before it is
- * destroyed.
- */
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ if (!xa_empty(&job->dependencies))
+ return xa_erase(&job->dependencies, job->last_dependency++);
+
+ if (job->sched->ops->dependency)
+ return job->sched->ops->dependency(job, entity);
+
+ return NULL;
+}
+
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
@@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
- while ((f = job->sched->ops->dependency(job, entity)))
+ while ((f = drm_sched_job_dependency(job, entity)))
dma_fence_wait(f, false);
drm_sched_fence_scheduled(s_fence);
@@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*
* @entity: scheduler entity
*
- * This should be called after @drm_sched_entity_do_release. It goes over the
- * entity and signals all jobs with an error code if the process was killed.
+ * Cleanups up @entity which has been initialized by drm_sched_entity_init().
*
+ * If there are potentially job still in flight or getting newly queued
+ * drm_sched_entity_flush() must be called first. This function then goes over
+ * the entity and signals all jobs with an error code if the process was killed.
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
@@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_destroy - Destroy a context entity
- *
* @entity: scheduler entity
*
- * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
+ * convenience wrapper.
*/
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
@@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
-/*
- * drm_sched_entity_clear_dep - callback to clear the entities dependency
- */
+/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
static void drm_sched_entity_clear_dep(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
-/**
- * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
- *
- * @entity: entity with dependency
- *
+/*
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
@@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
return false;
}
-/**
- * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
- *
- * @entity: entity to get the job from
- *
- * Process all dependencies and try to get one job from the entities queue.
- */
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job;
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
@@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
return NULL;
while ((entity->dependency =
- sched->ops->dependency(sched_job, entity))) {
+ drm_sched_job_dependency(sched_job, entity))) {
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
if (drm_sched_entity_add_dependency_cb(entity))
@@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(entity->last_scheduled);
+
entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+ /*
+ * If the queue is empty we allow drm_sched_entity_select_rq() to
+ * locklessly access ->last_scheduled. This only works if we set the
+ * pointer before we dequeue and if we a write barrier here.
+ */
+ smp_wmb();
+
spsc_queue_pop(&entity->job_queue);
return sched_job;
}
-/**
- * drm_sched_entity_select_rq - select a new rq for the entity
- *
- * @entity: scheduler entity
- *
- * Check all prerequisites and select a new rq for the entity for load
- * balancing.
- */
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
- if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
+ /* single possible engine and already selected */
+ if (!entity->sched_list)
return;
- fence = READ_ONCE(entity->last_scheduled);
+ /* queue non-empty, stay on the same engine */
+ if (spsc_queue_count(&entity->job_queue))
+ return;
+
+ /*
+ * Only when the queue is empty are we guaranteed that the scheduler
+ * thread cannot change ->last_scheduled. To enforce ordering we need
+ * a read barrier here. See drm_sched_entity_pop_job() for the other
+ * side.
+ */
+ smp_rmb();
+
+ fence = entity->last_scheduled;
+
+ /* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence))
return;
@@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
- *
* @sched_job: job to submit
- * @entity: scheduler entity
*
- * Note: To guarantee that the order of insertion to queue matches
- * the job's fence sequence number this function should be
- * called with drm_sched_job_init under common lock.
+ * Note: To guarantee that the order of insertion to queue matches the job's
+ * fence sequence number this function should be called with drm_sched_job_arm()
+ * under common lock for the struct drm_sched_entity that was set up for
+ * @sched_job in drm_sched_job_init().
*
* Returns 0 for success, negative error code otherwise.
*/
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity)
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
+ struct drm_sched_entity *entity = sched_job->entity;
bool first;
trace_drm_sched_job(sched_job, entity);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 69de2c76731f..7fd869520ef2 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
- int ret = dma_fence_signal(&fence->scheduled);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->scheduled,
- "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->scheduled,
- "was already signaled\n");
+ dma_fence_signal(&fence->scheduled);
}
void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
- int ret = dma_fence_signal(&fence->finished);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->finished,
- "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->finished,
- "was already signaled\n");
+ dma_fence_signal(&fence->finished);
}
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
@@ -83,19 +69,28 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
+static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+ if (!WARN_ON_ONCE(!fence))
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
/**
- * drm_sched_fence_free - free up the fence memory
+ * drm_sched_fence_free - free up an uninitialized fence
*
- * @rcu: RCU callback head
+ * @fence: fence to free
*
- * Free up the fence memory after the RCU grace period.
+ * Free up the fence memory. Should only be used if drm_sched_fence_init()
+ * has not been called yet.
*/
-static void drm_sched_fence_free(struct rcu_head *rcu)
+void drm_sched_fence_free(struct drm_sched_fence *fence)
{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct drm_sched_fence *fence = to_drm_sched_fence(f);
-
- kmem_cache_free(sched_fence_slab, fence);
+ /* This function should not be called if the fence has been initialized. */
+ if (!WARN_ON_ONCE(fence->sched))
+ kmem_cache_free(sched_fence_slab, fence);
}
/**
@@ -111,7 +106,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
- call_rcu(&fence->finished.rcu, drm_sched_fence_free);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
}
/**
@@ -152,27 +147,32 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
}
EXPORT_SYMBOL(to_drm_sched_fence);
-struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
- void *owner)
+struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
+ void *owner)
{
struct drm_sched_fence *fence = NULL;
- unsigned seq;
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
- fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
+ return fence;
+}
+
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity)
+{
+ unsigned seq;
+
+ fence->sched = entity->rq->sched;
seq = atomic_inc_return(&entity->fence_seq);
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
&fence->lock, entity->fence_context + 1, seq);
-
- return fence;
}
module_init(drm_sched_fence_slab_init);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 67382621b429..042c16b5d54a 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -48,9 +48,11 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
+#include <linux/dma-resv.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -564,7 +566,6 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
/**
* drm_sched_job_init - init a scheduler job
- *
* @job: scheduler job to init
* @entity: scheduler entity to use
* @owner: job owner for debugging
@@ -572,43 +573,193 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
*
+ * Drivers must make sure drm_sched_job_cleanup() if this function returns
+ * successfully, even when @job is aborted before drm_sched_job_arm() is called.
+ *
+ * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
+ * has died, which can mean that there's no valid runqueue for a @entity.
+ * This function returns -ENOENT in this case (which probably should be -EIO as
+ * a more meanigful return value).
+ *
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
- struct drm_gpu_scheduler *sched;
-
drm_sched_entity_select_rq(entity);
if (!entity->rq)
return -ENOENT;
- sched = entity->rq->sched;
-
- job->sched = sched;
job->entity = entity;
- job->s_priority = entity->rq - sched->sched_rq;
- job->s_fence = drm_sched_fence_create(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->id = atomic64_inc_return(&sched->job_id_count);
INIT_LIST_HEAD(&job->list);
+ xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
+
return 0;
}
EXPORT_SYMBOL(drm_sched_job_init);
/**
- * drm_sched_job_cleanup - clean up scheduler job resources
+ * drm_sched_job_arm - arm a scheduler job for execution
+ * @job: scheduler job to arm
+ *
+ * This arms a scheduler job for execution. Specifically it initializes the
+ * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
+ * or other places that need to track the completion of this job.
+ *
+ * Refer to drm_sched_entity_push_job() documentation for locking
+ * considerations.
+ *
+ * This can only be called if drm_sched_job_init() succeeded.
+ */
+void drm_sched_job_arm(struct drm_sched_job *job)
+{
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_entity *entity = job->entity;
+
+ BUG_ON(!entity);
+
+ sched = entity->rq->sched;
+
+ job->sched = sched;
+ job->s_priority = entity->rq - sched->sched_rq;
+ job->id = atomic64_inc_return(&sched->job_id_count);
+
+ drm_sched_fence_init(job->s_fence, job->entity);
+}
+EXPORT_SYMBOL(drm_sched_job_arm);
+
+/**
+ * drm_sched_job_add_dependency - adds the fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Note that @fence is consumed in both the success and error cases.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(&job->dependencies, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(&job->dependencies, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_dependency);
+
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ * dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
*
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
+
+ return drm_sched_job_add_dependency(job, fence);
+ }
+
+ ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_sched_job_add_dependency(job, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+
+
+/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
+ *
+ * Cleans up the resources allocated with drm_sched_job_init().
+ *
+ * Drivers should call this from their error unwind code if @job is aborted
+ * before drm_sched_job_arm() is called.
+ *
+ * After that point of no return @job is committed to be executed by the
+ * scheduler, and this function should be called from the
+ * &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
- dma_fence_put(&job->s_fence->finished);
+ struct dma_fence *fence;
+ unsigned long index;
+
+ if (kref_read(&job->s_fence->finished.refcount)) {
+ /* drm_sched_job_arm() has been called */
+ dma_fence_put(&job->s_fence->finished);
+ } else {
+ /* aborted job before committing to run it */
+ drm_sched_fence_free(job->s_fence);
+ }
+
job->s_fence = NULL;
+
+ xa_for_each(&job->dependencies, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&job->dependencies);
+
}
EXPORT_SYMBOL(drm_sched_job_cleanup);
@@ -676,15 +827,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
- /*
- * Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch pending_list
- */
- if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr)) ||
- kthread_should_park())
- return NULL;
-
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
@@ -693,17 +835,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
+
+ /* cancel this job's TO timer */
+ cancel_delayed_work(&sched->work_tdr);
/* make the scheduled timestamp more accurate */
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
- if (next)
+
+ if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
-
+ /* start TO timer for next job */
+ drm_sched_start_timeout(sched);
+ }
} else {
job = NULL;
- /* queue timeout for next job */
- drm_sched_start_timeout(sched);
}
spin_unlock(&sched->job_list_lock);
@@ -791,11 +937,8 @@ static int drm_sched_main(void *param)
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
- if (cleanup_job) {
+ if (cleanup_job)
sched->ops->free_job(cleanup_job);
- /* queue timeout for next job */
- drm_sched_start_timeout(sched);
- }
if (!entity)
continue;
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 195de30eb90c..dbdee954692a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -845,7 +845,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
/* Specifies the constant alpha value */
- val = CONSTA_MAX;
+ val = newstate->alpha >> 8;
reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
@@ -997,6 +997,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_helper_add(plane, &ltdc_plane_helper_funcs);
+ drm_plane_create_alpha_property(plane);
+
DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
return plane;
@@ -1024,6 +1026,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
return -EINVAL;
}
+ drm_plane_create_zpos_immutable_property(primary, 0);
+
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
@@ -1046,6 +1050,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
+ drm_plane_create_zpos_immutable_property(overlay, i);
}
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf8cfefa0365..f52ff4e6c662 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -782,7 +782,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
const struct sun4i_backend_quirks *quirks;
- struct resource *res;
void __iomem *regs;
int i, ret;
@@ -815,8 +814,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (IS_ERR(backend->frontend))
dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index edb60ae0a9b7..56ae38389db0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -561,7 +561,6 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
struct sun4i_frontend *frontend;
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
- struct resource *res;
void __iomem *regs;
frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
@@ -576,8 +575,7 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
if (!frontend->data)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 2f2c9f0a1071..3799a745b7dd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -489,7 +489,6 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
- struct resource *res;
u32 reg;
int ret;
@@ -504,8 +503,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
if (!hdmi->variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->base = devm_ioremap_resource(dev, res);
+ hdmi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->base)) {
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
return PTR_ERR(hdmi->base);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 9f06dec0fc61..88db2d2a9336 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -841,11 +841,9 @@ static int sun4i_tcon_init_regmap(struct device *dev,
struct sun4i_tcon *tcon)
{
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
void __iomem *regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index cb91bc11a0c7..94883abe0dfd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -538,7 +538,6 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tv *tv;
- struct resource *res;
void __iomem *regs;
int ret;
@@ -548,8 +547,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
tv->drv = drv;
dev_set_drvdata(dev, tv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
dev_err(dev, "Couldn't map the TV encoder registers\n");
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 4f5efcace68e..527c7b2474da 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1104,7 +1104,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
- struct resource *res;
void __iomem *base;
int ret;
@@ -1120,18 +1119,16 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
"allwinner,sun6i-a31-mipi-dsi"))
bus_clk_name = "bus";
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Couldn't map the DSI encoder registers\n");
return PTR_ERR(base);
}
dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
- if (IS_ERR(dsi->regulator)) {
- dev_err(dev, "Couldn't get VCC-DSI supply\n");
- return PTR_ERR(dsi->regulator);
- }
+ if (IS_ERR(dsi->regulator))
+ return dev_err_probe(dev, PTR_ERR(dsi->regulator),
+ "Couldn't get VCC-DSI supply\n");
dsi->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(dsi->reset)) {
@@ -1146,10 +1143,9 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
}
dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
- if (IS_ERR(dsi->bus_clk)) {
- dev_err(dev, "Couldn't get the DSI bus clock\n");
- return PTR_ERR(dsi->bus_clk);
- }
+ if (IS_ERR(dsi->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(dsi->bus_clk),
+ "Couldn't get the DSI bus clock\n");
ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
if (ret)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index a55a38ad849c..022cafa6c06c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -16,8 +16,8 @@ struct sun8i_mixer;
#define CCSC10_OFFSET 0xA0000
#define CCSC11_OFFSET 0xF0000
-#define SUN8I_CSC_CTRL(base) (base + 0x0)
-#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i)
+#define SUN8I_CSC_CTRL(base) ((base) + 0x0)
+#define SUN8I_CSC_COEFF(base, i) ((base) + 0x10 + 4 * (i))
#define SUN8I_CSC_CTRL_EN BIT(0)
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index f75fb157f2ff..21d473deb757 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -153,22 +153,19 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return -EPROBE_DEFER;
hdmi->rst_ctrl = devm_reset_control_get(dev, "ctrl");
- if (IS_ERR(hdmi->rst_ctrl)) {
- dev_err(dev, "Could not get ctrl reset control\n");
- return PTR_ERR(hdmi->rst_ctrl);
- }
+ if (IS_ERR(hdmi->rst_ctrl))
+ return dev_err_probe(dev, PTR_ERR(hdmi->rst_ctrl),
+ "Could not get ctrl reset control\n");
hdmi->clk_tmds = devm_clk_get(dev, "tmds");
- if (IS_ERR(hdmi->clk_tmds)) {
- dev_err(dev, "Couldn't get the tmds clock\n");
- return PTR_ERR(hdmi->clk_tmds);
- }
+ if (IS_ERR(hdmi->clk_tmds))
+ return dev_err_probe(dev, PTR_ERR(hdmi->clk_tmds),
+ "Couldn't get the tmds clock\n");
hdmi->regulator = devm_regulator_get(dev, "hvcc");
- if (IS_ERR(hdmi->regulator)) {
- dev_err(dev, "Couldn't get regulator\n");
- return PTR_ERR(hdmi->regulator);
- }
+ if (IS_ERR(hdmi->regulator))
+ return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
+ "Couldn't get regulator\n");
ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
if (!ret) {
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 5b42cf25cc86..f5e8aeaa3cdf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -337,7 +337,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
- struct resource *res;
void __iomem *regs;
unsigned int base;
int plane_cnt;
@@ -390,8 +389,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
if (!mixer->cfg)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 75d8e60c149d..1b9b8b48f4a7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -128,7 +128,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
const struct sun8i_tcon_top_quirks *quirks;
- struct resource *res;
void __iomem *regs;
int ret, i;
@@ -158,8 +157,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
return PTR_ERR(tcon_top->bus);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
tcon_top->regs = regs;
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index cae8b8cbe9dd..c04dda8353fd 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
- if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
else
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index e00ec3f40ec8..16a1cdc28657 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
return true;
/* check for the sector layout bit */
- if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
if (!tegra_plane_supports_sector_layout(plane))
return false;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index d31be274a2bd..1ceb93fbdc50 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
- depends on DRM && USB
+ depends on DRM && USB && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -53,7 +53,7 @@ config DRM_GM12U320
config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
- depends on DRM
+ depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 73415fa9ae0f..2ce3bd903b70 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -63,6 +63,7 @@ MODULE_PARM_DESC(defy, "default y resolution");
enum bochs_types {
BOCHS_QEMU_STDVGA,
+ BOCHS_SIMICS,
BOCHS_UNKNOWN,
};
@@ -695,6 +696,13 @@ static const struct pci_device_id bochs_pci_tbl[] = {
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_UNKNOWN,
},
+ {
+ .vendor = 0x4321,
+ .device = 0x1111,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = BOCHS_SIMICS,
+ },
{ /* end of list */ }
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea4add2b9717..3b22c0013dbf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
+{
+ struct ttm_device *bdev = bo->bdev;
+
+ list_move_tail(&bo->lru, &bdev->pinned);
+
+ if (bdev->funcs->del_from_lru_notify)
+ bdev->funcs->del_from_lru_notify(bo);
+}
+
+static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
if (bo->pin_count) {
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
return;
}
@@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -914,57 +924,11 @@ out:
return ret;
}
-static bool ttm_bo_places_compat(const struct ttm_place *places,
- unsigned num_placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- unsigned i;
-
- if (mem->placement & TTM_PL_FLAG_TEMPORARY)
- return false;
-
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
-
- if ((mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
-
- *new_flags = heap->flags;
- if ((mem->mem_type == heap->mem_type) &&
- (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
- }
- return false;
-}
-
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- if (ttm_bo_places_compat(placement->placement, placement->num_placement,
- mem, new_flags))
- return true;
-
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_bo_places_compat(placement->busy_placement,
- placement->num_busy_placement,
- mem, new_flags))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ttm_bo_mem_compat);
-
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
int ret;
- uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
@@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
+ if (!ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -1160,12 +1124,12 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
}
if (bo->deleted) {
- ttm_bo_cleanup_refs(bo, false, false, locked);
+ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
- return 0;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
@@ -1216,7 +1180,7 @@ out:
if (locked)
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- return ret;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
@@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
if (bo->ttm == NULL)
return;
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 763fa6f4e07d..1c5ffe2935af 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -143,7 +143,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_resource *src_mem = bo->resource;
struct ttm_resource_manager *src_man =
ttm_manager_type(bdev, src_mem->mem_type);
- struct ttm_resource src_copy = *src_mem;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
@@ -173,11 +172,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
- src_copy = *src_mem;
- ttm_bo_move_sync_cleanup(bo, dst_mem);
if (!src_iter->ops->maps_tt)
- ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+
out_src_iter:
if (!dst_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 519deea8e39b..be24bb6cefd0 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -44,6 +44,8 @@ static unsigned ttm_glob_use_count;
struct ttm_global ttm_glob;
EXPORT_SYMBOL(ttm_glob);
+struct dentry *ttm_debugfs_root;
+
static void ttm_global_release(void)
{
struct ttm_global *glob = &ttm_glob;
@@ -53,6 +55,7 @@ static void ttm_global_release(void)
goto out;
ttm_pool_mgr_fini();
+ debugfs_remove(ttm_debugfs_root);
__free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob));
@@ -73,6 +76,11 @@ static int ttm_global_init(void)
si_meminfo(&si);
+ ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
+ if (IS_ERR(ttm_debugfs_root)) {
+ ttm_debugfs_root = NULL;
+ }
+
/* Limit the number of pages in the pool to about 50% of the total
* system memory.
*/
@@ -100,6 +108,8 @@ static int ttm_global_init(void)
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
&glob->bo_count);
out:
+ if (ret && ttm_debugfs_root)
+ debugfs_remove(ttm_debugfs_root);
if (ret)
--ttm_glob_use_count;
mutex_unlock(&ttm_global_mutex);
@@ -210,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
+ INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -247,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev)
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ unsigned int i, j;
+
+ spin_lock(&bdev->lru_lock);
+ while (!list_empty(&bdev->pinned)) {
+ bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+ /* Take ref against racing releases once lru_lock is unlocked */
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+
+ for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ man = ttm_manager_type(bdev, i);
+ if (!man || !man->use_tt)
+ continue;
+
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ while (!list_empty(&man->lru[j])) {
+ bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+ }
+ }
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 997c458f68a9..0037eefe3239 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -40,6 +40,18 @@
#include "ttm_module.h"
/**
+ * DOC: TTM
+ *
+ * TTM is a memory manager for accelerator devices with dedicated memory.
+ *
+ * The basic idea is that resources are grouped together in buffer objects of
+ * certain size and TTM handles lifetime, movement and CPU mappings of those
+ * objects.
+ *
+ * TODO: Add more design background and information here.
+ */
+
+/**
* ttm_prot_from_caching - Modify the page protection according to the
* ttm cacing mode
* @caching: The ttm caching mode
@@ -72,22 +84,6 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
return tmp;
}
-struct dentry *ttm_debugfs_root;
-
-static int __init ttm_init(void)
-{
- ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
- return 0;
-}
-
-static void __exit ttm_exit(void)
-{
- debugfs_remove(ttm_debugfs_root);
-}
-
-module_init(ttm_init);
-module_exit(ttm_exit);
-
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cb38b1a17b09..c961a788b519 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static struct mutex shrinker_lock;
+static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
- unsigned int num_freed;
+ unsigned int num_pages;
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
- num_freed = 1 << pt->order;
+ num_pages = 1 << pt->order;
} else {
- num_freed = 0;
+ num_pages = 0;
}
- list_move_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
-
- return num_freed;
+ return num_pages;
}
/* Return the allocation order based for a page */
@@ -383,7 +382,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
bool apply_caching = false;
struct ttm_pool_type *pt;
@@ -530,6 +530,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+ * that no shrinker is concurrently freeing pages from the pool.
+ */
+ synchronize_shrinkers();
}
/* As long as pages are available make sure to release at least one */
@@ -604,7 +609,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
@@ -613,7 +618,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
@@ -640,7 +645,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -656,7 +661,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
@@ -693,7 +698,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- mutex_init(&shrinker_lock);
+ spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index f4b08a8705b3..67d68a4a8640 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_device *bdev,
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
@@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev,
ttm_resource_manager_set_used(man, true);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_init);
+EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
* ttm_range_man_fini
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init);
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_device *bdev,
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
@@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev,
kfree(rman);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_fini);
+EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 2431717376e7..035d71332d18 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
+static bool ttm_resource_places_compat(struct ttm_resource *res,
+ const struct ttm_place *places,
+ unsigned num_placement)
+{
+ unsigned i;
+
+ if (res->placement & TTM_PL_FLAG_TEMPORARY)
+ return false;
+
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
+
+ if (res->start < heap->fpfn || (heap->lpfn &&
+ (res->start + res->num_pages) > heap->lpfn))
+ continue;
+
+ if ((res->mem_type == heap->mem_type) &&
+ (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ttm_resource_compat - check if resource is compatible with placement
+ *
+ * @res: the resource to check
+ * @placement: the placement to check against
+ *
+ * Returns true if the placement is compatible.
+ */
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement)
+{
+ if (ttm_resource_places_compat(res, placement->placement,
+ placement->num_placement))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_resource_places_compat(res, placement->busy_placement,
+ placement->num_busy_placement))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ttm_resource_compat);
+
/**
* ttm_resource_manager_init
*
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 24031a8acd2d..dae52433beeb 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <drm/drm_cache.h>
@@ -123,17 +122,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- ttm_tt_unpopulate(bdev, ttm);
-
- if (ttm->swap_storage)
- fput(ttm->swap_storage);
-
- ttm->swap_storage = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_destroy_common);
-
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
@@ -168,6 +156,12 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
+ WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED);
+
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
+ ttm->swap_storage = NULL;
+
if (ttm->pages)
kvfree(ttm->pages);
else
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 1f497d8f1ae5..c744175c6992 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,6 +4,7 @@ config DRM_UDL
depends on DRM
depends on USB
depends on USB_ARCH_HAS_HCD
+ depends on MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 9a5c44606337..e973ec487484 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_V3D
tristate "Broadcom V3D 3.x and newer"
- depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
+ depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST
depends on DRM
depends on COMMON_CLK
depends on MMU
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 9403c3b36aca..c1deab2cf38d 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -206,10 +206,7 @@ MODULE_DEVICE_TABLE(of, v3d_of_match);
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
- struct resource *res =
- platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
-
- *regs = devm_ioremap_resource(v3d->drm.dev, res);
+ *regs = devm_platform_ioremap_resource_byname(v3d_to_pdev(v3d), name);
return PTR_ERR_OR_ZERO(*regs);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 270134779073..b900a050d5e2 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -234,11 +234,6 @@ struct v3d_job {
struct drm_gem_object **bo;
u32 bo_count;
- /* Array of struct dma_fence * to block on before submitting this job.
- */
- struct xarray deps;
- unsigned long last_dep;
-
/* v3d fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *irq_fence;
@@ -379,6 +374,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void v3d_job_cleanup(struct v3d_job *job);
void v3d_job_put(struct v3d_job *job);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5689da118197..ead0be8d48a7 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -197,8 +197,8 @@ v3d_clean_caches(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
- V3D_L2TCACTL_L2TFLS), 100)) {
- DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
+ V3D_L2TCACTL_TMUWCF), 100)) {
+ DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
}
mutex_lock(&v3d->cache_clean_lock);
@@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
return ret;
for (i = 0; i < job->bo_count; i++) {
- ret = drm_gem_fence_array_add_implicit(&job->deps,
- job->bo[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
if (ret) {
drm_gem_unlock_reservations(job->bo, job->bo_count,
acquire_ctx);
@@ -356,8 +356,6 @@ static void
v3d_job_free(struct kref *ref)
{
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- unsigned long index;
- struct dma_fence *fence;
int i;
for (i = 0; i < job->bo_count; i++) {
@@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref)
}
kvfree(job->bo);
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@@ -397,6 +390,12 @@ v3d_render_job_free(struct kref *ref)
v3d_job_free(ref);
}
+void v3d_job_cleanup(struct v3d_job *job)
+{
+ drm_sched_job_cleanup(&job->base);
+ v3d_job_put(job);
+}
+
void v3d_job_put(struct v3d_job *job)
{
kref_put(&job->refcount, job->free);
@@ -438,9 +437,10 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
struct v3d_job *job, void (*free)(struct kref *ref),
- u32 in_sync)
+ u32 in_sync, enum v3d_queue queue)
{
struct dma_fence *in_fence = NULL;
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
int ret;
job->v3d = v3d;
@@ -450,44 +450,40 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
if (ret < 0)
return ret;
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ v3d_priv);
+ if (ret)
+ goto fail;
ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
if (ret == -EINVAL)
- goto fail;
+ goto fail_job;
- ret = drm_gem_fence_array_add(&job->deps, in_fence);
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
if (ret)
- goto fail;
+ goto fail_job;
kref_init(&job->refcount);
return 0;
+fail_job:
+ drm_sched_job_cleanup(&job->base);
fail:
- xa_destroy(&job->deps);
pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
-static int
-v3d_push_job(struct v3d_file_priv *v3d_priv,
- struct v3d_job *job, enum v3d_queue queue)
+static void
+v3d_push_job(struct v3d_job *job)
{
- int ret;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- v3d_priv);
- if (ret)
- return ret;
+ drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
/* put by scheduler job completion */
kref_get(&job->refcount);
- drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
-
- return 0;
+ drm_sched_entity_push_job(&job->base);
}
static void
@@ -562,7 +558,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
INIT_LIST_HEAD(&render->unref_list);
ret = v3d_job_init(v3d, file_priv, &render->base,
- v3d_render_job_free, args->in_sync_rcl);
+ v3d_render_job_free, args->in_sync_rcl, V3D_RENDER);
if (ret) {
kfree(render);
return ret;
@@ -571,14 +567,14 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->bcl_start != args->bcl_end) {
bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
if (!bin) {
- v3d_job_put(&render->base);
+ v3d_job_cleanup(&render->base);
return -ENOMEM;
}
ret = v3d_job_init(v3d, file_priv, &bin->base,
- v3d_job_free, args->in_sync_bcl);
+ v3d_job_free, args->in_sync_bcl, V3D_BIN);
if (ret) {
- v3d_job_put(&render->base);
+ v3d_job_cleanup(&render->base);
kfree(bin);
return ret;
}
@@ -598,7 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail;
}
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
if (ret) {
kfree(clean_job);
clean_job = NULL;
@@ -633,31 +629,26 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (bin) {
bin->base.perfmon = render->base.perfmon;
v3d_perfmon_get(bin->base.perfmon);
- ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&bin->base);
- ret = drm_gem_fence_array_add(&render->base.deps,
- dma_fence_get(bin->base.done_fence));
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
if (ret)
goto fail_unreserve;
}
- ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&render->base);
if (clean_job) {
struct dma_fence *render_fence =
dma_fence_get(render->base.done_fence);
- ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
if (ret)
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
v3d_perfmon_get(clean_job->perfmon);
- ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(clean_job);
}
mutex_unlock(&v3d->sched_lock);
@@ -682,10 +673,10 @@ fail_unreserve:
last_job->bo_count, &acquire_ctx);
fail:
if (bin)
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
+ v3d_job_cleanup(&bin->base);
+ v3d_job_cleanup(&render->base);
if (clean_job)
- v3d_job_put(clean_job);
+ v3d_job_cleanup(clean_job);
return ret;
}
@@ -704,7 +695,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
struct v3d_tfu_job *job;
struct ww_acquire_ctx acquire_ctx;
@@ -717,7 +707,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync);
+ v3d_job_free, args->in_sync, V3D_TFU);
if (ret) {
kfree(job);
return ret;
@@ -726,7 +716,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
if (!job->base.bo) {
- v3d_job_put(&job->base);
+ v3d_job_cleanup(&job->base);
return -ENOMEM;
}
@@ -761,9 +751,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
goto fail;
mutex_lock(&v3d->sched_lock);
- ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&job->base);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
@@ -775,12 +763,8 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return 0;
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
- &acquire_ctx);
fail:
- v3d_job_put(&job->base);
+ v3d_job_cleanup(&job->base);
return ret;
}
@@ -818,7 +802,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync);
+ v3d_job_free, args->in_sync, V3D_CSD);
if (ret) {
kfree(job);
return ret;
@@ -826,14 +810,13 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
if (!clean_job) {
- v3d_job_put(&job->base);
- kfree(job);
+ v3d_job_cleanup(&job->base);
return -ENOMEM;
}
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
if (ret) {
- v3d_job_put(&job->base);
+ v3d_job_cleanup(&job->base);
kfree(clean_job);
return ret;
}
@@ -859,18 +842,14 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&v3d->sched_lock);
- ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&job->base);
- ret = drm_gem_fence_array_add(&clean_job->deps,
- dma_fence_get(job->base.done_fence));
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
- ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(clean_job);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
@@ -889,8 +868,8 @@ fail_unreserve:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
+ v3d_job_cleanup(&job->base);
+ v3d_job_cleanup(clean_job);
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index dd7fcc36d726..e0cb7d0697a7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -13,7 +13,7 @@
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
- * v3d_job_dependency() to manage the dependency between bin and
+ * drm_sched_job_add_dependency() to manage the dependency between bin and
* render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job)
}
static void
-v3d_job_free(struct drm_sched_job *sched_job)
+v3d_sched_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- drm_sched_job_cleanup(sched_job);
- v3d_job_put(job);
+ v3d_job_cleanup(job);
}
static void
@@ -73,28 +72,6 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
v3d_perfmon_start(v3d, job->perfmon);
}
-/*
- * Returns the fences that the job depends on, one by one.
- *
- * If placed in the scheduler's .dependency method, the corresponding
- * .run_job won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct v3d_job *job = to_v3d_job(sched_job);
-
- /* XXX: Wait on a fence for switching the GMP if necessary,
- * and then do so.
- */
-
- if (!xa_empty(&job->deps))
- return xa_erase(&job->deps, job->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
@@ -373,38 +350,33 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_bin_job_run,
.timedout_job = v3d_bin_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_render_job_run,
.timedout_job = v3d_render_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_generic_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_csd_job_run,
.timedout_job = v3d_csd_job_timedout,
- .free_job = v3d_job_free
+ .free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_cache_clean_job_run,
.timedout_job = v3d_generic_job_timedout,
- .free_job = v3d_job_free
+ .free_job = v3d_sched_job_free
};
int
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index a90f2545baee..c180eb60bee8 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -229,26 +229,19 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct device *dev = &dpi->pdev->dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
- int ret;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &bridge);
- if (ret) {
+ bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
*/
- if (ret == -ENODEV)
+ if (PTR_ERR(bridge) == -ENODEV)
return 0;
else
- return ret;
+ return PTR_ERR(bridge);
}
- if (panel)
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DPI);
-
return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index f6c16c5aee68..16abc3a3d601 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -50,13 +50,11 @@
#define DRIVER_PATCHLEVEL 0
/* Helper function for mapping the regs on a platform device. */
-void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
+void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
{
- struct resource *res;
void __iomem *map;
- res = platform_get_resource(dev, IORESOURCE_MEM, index);
- map = devm_ioremap_resource(&dev->dev, res);
+ map = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(map))
return map;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index a185027911ce..a229da58962a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1497,7 +1497,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
- struct drm_panel *panel;
const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
@@ -1609,27 +1608,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->bridge);
- if (ret) {
- /* If the bridge or panel pointed by dev->of_node is not
- * enabled, just return 0 here so that we don't prevent the DRM
- * dev from being registered. Of course that means the DSI
- * encoder won't be exposed, but that's not a problem since
- * nothing is connected to it.
- */
- if (ret == -ENODEV)
- return 0;
-
- return ret;
- }
-
- if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
- if (IS_ERR(dsi->bridge))
- return PTR_ERR(dsi->bridge);
- }
+ dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->bridge))
+ return PTR_ERR(dsi->bridge);
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
@@ -1667,8 +1648,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- if (dsi->bridge)
- pm_runtime_disable(dev);
+ pm_runtime_disable(dev);
/*
* Restore the bridge_chain so the bridge detach procedure can happen
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b7dc32a0c9bb..b4b4653fe301 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
- struct drm_crtc *crtc = cstate->crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
if (!vc4_hdmi_supports_scrambling(encoder, mode))
return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_connector_state *cstate = connector->state;
+ struct drm_crtc *crtc = encoder->crtc;
/*
- * At boot, connector->state will be NULL. Since we don't know the
+ * At boot, encoder->crtc will be NULL. Since we don't know the
* state of the scrambler and in order to avoid any
* inconsistency, let's disable it all the time.
*/
- if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
return;
- if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+ if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
return;
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
+
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_connector_state *conn_state =
- vc4_hdmi_encoder_get_connector_state(encoder, state);
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(state, conn_state->crtc);
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
- struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_crtc *crtc = connector->state->crtc;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 n, cts;
u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
/*
* If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing.
*/
- if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+ if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV;
@@ -1462,7 +1454,7 @@ static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
.audio_startup = vc4_hdmi_audio_startup,
};
-struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
+static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.ops = &vc4_hdmi_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
@@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
- return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret)
- return ret;
-
- return 0;
-}
-#endif
-
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
- SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
- vc4_hdmi_runtime_resume,
- NULL)
-};
-
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
- .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index bf38a7e319d1..a87eafa89e9f 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
@@ -50,87 +51,11 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static const struct drm_gem_object_funcs vgem_gem_object_funcs;
-
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
} *vgem_device;
-static void vgem_gem_free_object(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
-
- kvfree(vgem_obj->pages);
- mutex_destroy(&vgem_obj->pages_lock);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, vgem_obj->table);
-
- drm_gem_object_release(obj);
- kfree(vgem_obj);
-}
-
-static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_vgem_gem_object *obj = vma->vm_private_data;
- /* We don't use vmf->pgoff since that has the fake offset */
- unsigned long vaddr = vmf->address;
- vm_fault_t ret = VM_FAULT_SIGBUS;
- loff_t num_pages;
- pgoff_t page_offset;
- page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-
- num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
- if (page_offset >= num_pages)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&obj->pages_lock);
- if (obj->pages) {
- get_page(obj->pages[page_offset]);
- vmf->page = obj->pages[page_offset];
- ret = 0;
- }
- mutex_unlock(&obj->pages_lock);
- if (ret) {
- struct page *page;
-
- page = shmem_read_mapping_page(
- file_inode(obj->base.filp)->i_mapping,
- page_offset);
- if (!IS_ERR(page)) {
- vmf->page = page;
- ret = 0;
- } else switch (PTR_ERR(page)) {
- case -ENOSPC:
- case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
- case -EBUSY:
- ret = VM_FAULT_RETRY;
- break;
- case -EFAULT:
- case -EINVAL:
- ret = VM_FAULT_SIGBUS;
- break;
- default:
- WARN_ON(PTR_ERR(page));
- ret = VM_FAULT_SIGBUS;
- break;
- }
-
- }
- return ret;
-}
-
-static const struct vm_operations_struct vgem_gem_vm_ops = {
- .fault = vgem_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
@@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
kfree(vfile);
}
-static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
- unsigned long size)
-{
- struct drm_vgem_gem_object *obj;
- int ret;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- obj->base.funcs = &vgem_gem_object_funcs;
-
- ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
- if (ret) {
- kfree(obj);
- return ERR_PTR(ret);
- }
-
- mutex_init(&obj->pages_lock);
-
- return obj;
-}
-
-static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
-{
- drm_gem_object_release(&obj->base);
- kfree(obj);
-}
-
-static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
- struct drm_file *file,
- unsigned int *handle,
- unsigned long size)
-{
- struct drm_vgem_gem_object *obj;
- int ret;
-
- obj = __vgem_gem_create(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, handle);
- if (ret) {
- drm_gem_object_put(&obj->base);
- return ERR_PTR(ret);
- }
-
- return &obj->base;
-}
-
-static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_object *gem_object;
- u64 pitch, size;
-
- pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- size = args->height * pitch;
- if (size == 0)
- return -EINVAL;
-
- gem_object = vgem_gem_create(dev, file, &args->handle, size);
- if (IS_ERR(gem_object))
- return PTR_ERR(gem_object);
-
- args->size = gem_object->size;
- args->pitch = pitch;
-
- drm_gem_object_put(gem_object);
-
- DRM_DEBUG("Created object of size %llu\n", args->size);
-
- return 0;
-}
-
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
-static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long flags = vma->vm_flags;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- /* Keep the WC mmaping set by drm_gem_mmap() but our pages
- * are ordinary and not special.
- */
- vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
- return 0;
-}
+DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
-static const struct file_operations vgem_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = vgem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
-};
-
-static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
-{
- mutex_lock(&bo->pages_lock);
- if (bo->pages_pin_count++ == 0) {
- struct page **pages;
-
- pages = drm_gem_get_pages(&bo->base);
- if (IS_ERR(pages)) {
- bo->pages_pin_count--;
- mutex_unlock(&bo->pages_lock);
- return pages;
- }
-
- bo->pages = pages;
- }
- mutex_unlock(&bo->pages_lock);
-
- return bo->pages;
-}
-
-static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
-{
- mutex_lock(&bo->pages_lock);
- if (--bo->pages_pin_count == 0) {
- drm_gem_put_pages(&bo->base, bo->pages, true, true);
- bo->pages = NULL;
- }
- mutex_unlock(&bo->pages_lock);
-}
-
-static int vgem_prime_pin(struct drm_gem_object *obj)
+static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages;
+ struct drm_gem_shmem_object *obj;
- pages = vgem_pin_pages(bo);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
+ /*
+ * vgem doesn't have any begin/end cpu access ioctls, therefore must use
+ * coherent memory or dma-buf sharing just wont work.
*/
- drm_clflush_pages(pages, n_pages);
-
- return 0;
-}
-
-static void vgem_prime_unpin(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- vgem_unpin_pages(bo);
-}
-
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
-}
-
-static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
-
- return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
-}
-
-static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach, struct sg_table *sg)
-{
- struct drm_vgem_gem_object *obj;
- int npages;
-
- obj = __vgem_gem_create(dev, attach->dmabuf->size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
+ obj->map_wc = true;
- npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
-
- obj->table = sg;
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!obj->pages) {
- __vgem_gem_destroy(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- obj->pages_pin_count++; /* perma-pinned */
- drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
-static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages;
- void *vaddr;
-
- pages = vgem_pin_pages(bo);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
- if (!vaddr)
- return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
-
- return 0;
-}
-
-static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- vunmap(map->vaddr);
- vgem_unpin_pages(bo);
-}
-
-static int vgem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- if (obj->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!obj->filp)
- return -ENODEV;
-
- ret = call_mmap(obj->filp, vma);
- if (ret)
- return ret;
-
- vma_set_file(vma, obj->filp);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- return 0;
-}
-
-static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
- .free = vgem_gem_free_object,
- .pin = vgem_prime_pin,
- .unpin = vgem_prime_unpin,
- .get_sg_table = vgem_prime_get_sg_table,
- .vmap = vgem_prime_vmap,
- .vunmap = vgem_prime_vunmap,
- .vm_ops = &vgem_gem_vm_ops,
-};
-
static const struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
@@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = {
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
- .dumb_create = vgem_gem_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = vgem_prime_import,
- .gem_prime_import_sg_table = vgem_prime_import_sg_table,
- .gem_prime_mmap = vgem_prime_mmap,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_create_object = vgem_gem_create_object,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d4e610a44e12..0c4810982530 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -26,6 +26,7 @@
#ifndef VIRTIO_DRV_H
#define VIRTIO_DRV_H
+#include <linux/dma-direction.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr);
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir);
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index e45dbf14b307..55d80b77d9b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
return 0;
}
+static struct sg_table *
+virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo))
+ return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
+
+ return drm_gem_map_dma_buf(attach, dir);
+}
+
+static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo)) {
+ virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
+ return;
+ }
+
+ drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
.cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .map_dma_buf = virtgpu_gem_map_dma_buf,
+ .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 2e71e91278b4..93a41d018dca 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -91,9 +91,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE ||
size < sizeof(struct virtio_gpu_ctrl_hdr));
@@ -147,10 +145,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 5cc34e7330fa..6b45b0429fef 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
+#include <linux/dma-mapping.h>
+
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return ret;
}
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ struct sg_table *sgt;
+ dma_addr_t addr;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
+ // Virtio devices can access the dma-buf via its UUID. Return a stub
+ // sg_table so the dma-buf API still works.
+ if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
+ ret = -EIO;
+ goto out;
+ }
+ return sgt;
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ addr = dma_map_resource(dev, vram->vram_node.start,
+ vram->vram_node.size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ goto out;
+
+ sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
+ sg_dma_address(sgt->sgl) = addr;
+ sg_dma_len(sgt->sgl) = vram->vram_node.size;
+
+ return sgt;
+out:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (sgt->nents) {
+ dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
+ sg_dma_len(sgt->sgl), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9e3e1429db94..fd007f1c1776 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -94,7 +94,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
- uint32_t new_flags;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -103,8 +102,8 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, placement)
+ ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
@@ -136,7 +135,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
- uint32_t new_flags;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -145,8 +143,8 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
+ ? 0 : -EINVAL;
goto out_unreserve;
}
@@ -208,7 +206,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
- uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->resource->num_pages;
@@ -236,8 +233,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
}
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, &placement)
+ ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index b0973c27e774..e899a936a42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -328,7 +328,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
int ret = 0;
static size_t sgl_size;
static size_t sgt_size;
- struct scatterlist *sg;
if (vmw_tt->mapped)
return 0;
@@ -351,15 +350,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
- vsgt->num_pages, 0,
- (unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev),
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ ret = sg_alloc_table_from_pages_segment(
+ &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+ if (ret)
goto out_sg_alloc_fail;
- }
if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
uint64_t over_alloc =
@@ -526,14 +522,8 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
- vmw_ttm_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ttm_tt_fini(&vmw_be->dma_ttm);
- else
- ttm_tt_fini(ttm);
-
+ ttm_tt_fini(ttm);
if (vmw_be->mob)
vmw_mob_destroy(vmw_be->mob);
@@ -578,6 +568,8 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
dma_ttm);
unsigned int i;
+ vmw_ttm_unbind(bdev, ttm);
+
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
vmw_tt->mob = NULL;
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
deleted file mode 100644
index aa8594190b50..000000000000
--- a/drivers/gpu/drm/zte/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_ZTE
- tristate "DRM Support for ZTE SoCs"
- depends on DRM && ARCH_ZX
- select DRM_KMS_CMA_HELPER
- select DRM_KMS_HELPER
- select SND_SOC_HDMI_CODEC if SND_SOC
- select VIDEOMODE_HELPERS
- help
- Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
deleted file mode 100644
index b6d966d849dd..000000000000
--- a/drivers/gpu/drm/zte/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-zxdrm-y := \
- zx_drm_drv.o \
- zx_hdmi.o \
- zx_plane.o \
- zx_tvenc.o \
- zx_vga.o \
- zx_vou.o
-
-obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_common_regs.h b/drivers/gpu/drm/zte/zx_common_regs.h
deleted file mode 100644
index b7b996db129d..000000000000
--- a/drivers/gpu/drm/zte/zx_common_regs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __ZX_COMMON_REGS_H__
-#define __ZX_COMMON_REGS_H__
-
-/* CSC registers */
-#define CSC_CTRL0 0x30
-#define CSC_COV_MODE_SHIFT 16
-#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
-#define CSC_BT601_IMAGE_RGB2YCBCR 0
-#define CSC_BT601_IMAGE_YCBCR2RGB 1
-#define CSC_BT601_VIDEO_RGB2YCBCR 2
-#define CSC_BT601_VIDEO_YCBCR2RGB 3
-#define CSC_BT709_IMAGE_RGB2YCBCR 4
-#define CSC_BT709_IMAGE_YCBCR2RGB 5
-#define CSC_BT709_VIDEO_RGB2YCBCR 6
-#define CSC_BT709_VIDEO_YCBCR2RGB 7
-#define CSC_BT2020_IMAGE_RGB2YCBCR 8
-#define CSC_BT2020_IMAGE_YCBCR2RGB 9
-#define CSC_BT2020_VIDEO_RGB2YCBCR 10
-#define CSC_BT2020_VIDEO_YCBCR2RGB 11
-#define CSC_WORK_ENABLE BIT(0)
-
-#endif /* __ZX_COMMON_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
deleted file mode 100644
index 064056503ebb..000000000000
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "zx_drm_drv.h"
-#include "zx_vou.h"
-
-static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
-
-static const struct drm_driver zx_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
- .fops = &zx_drm_fops,
- .name = "zx-vou",
- .desc = "ZTE VOU Controller DRM",
- .date = "20160811",
- .major = 1,
- .minor = 0,
-};
-
-static int zx_drm_bind(struct device *dev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&zx_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- dev_set_drvdata(dev, drm);
-
- drm_mode_config_init(drm);
- drm->mode_config.min_width = 16;
- drm->mode_config.min_height = 16;
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
- drm->mode_config.funcs = &zx_drm_mode_config_funcs;
-
- ret = component_bind_all(dev, drm);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
- goto out_unregister;
- }
-
- ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
- goto out_unbind;
- }
-
- drm_mode_config_reset(drm);
- drm_kms_helper_poll_init(drm);
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto out_poll_fini;
-
- drm_fbdev_generic_setup(drm, 32);
-
- return 0;
-
-out_poll_fini:
- drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
-out_unbind:
- component_unbind_all(dev, drm);
-out_unregister:
- dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
- return ret;
-}
-
-static void zx_drm_unbind(struct device *dev)
-{
- struct drm_device *drm = dev_get_drvdata(dev);
-
- drm_dev_unregister(drm);
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
- component_unbind_all(dev, drm);
- dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
-}
-
-static const struct component_master_ops zx_drm_master_ops = {
- .bind = zx_drm_bind,
- .unbind = zx_drm_unbind,
-};
-
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int zx_drm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *parent = dev->of_node;
- struct device_node *child;
- struct component_match *match = NULL;
- int ret;
-
- ret = devm_of_platform_populate(dev);
- if (ret)
- return ret;
-
- for_each_available_child_of_node(parent, child)
- component_match_add(dev, &match, compare_of, child);
-
- return component_master_add_with_match(dev, &zx_drm_master_ops, match);
-}
-
-static int zx_drm_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &zx_drm_master_ops);
- return 0;
-}
-
-static const struct of_device_id zx_drm_of_match[] = {
- { .compatible = "zte,zx296718-vou", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_drm_of_match);
-
-static struct platform_driver zx_drm_platform_driver = {
- .probe = zx_drm_probe,
- .remove = zx_drm_remove,
- .driver = {
- .name = "zx-drm",
- .of_match_table = zx_drm_of_match,
- },
-};
-
-static struct platform_driver *drivers[] = {
- &zx_crtc_driver,
- &zx_hdmi_driver,
- &zx_tvenc_driver,
- &zx_vga_driver,
- &zx_drm_platform_driver,
-};
-
-static int zx_drm_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-module_init(zx_drm_init);
-
-static void zx_drm_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-module_exit(zx_drm_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
deleted file mode 100644
index 80cdaf479c74..000000000000
--- a/drivers/gpu/drm/zte/zx_drm_drv.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_DRM_DRV_H__
-#define __ZX_DRM_DRV_H__
-
-extern struct platform_driver zx_crtc_driver;
-extern struct platform_driver zx_hdmi_driver;
-extern struct platform_driver zx_tvenc_driver;
-extern struct platform_driver zx_vga_driver;
-
-static inline u32 zx_readl(void __iomem *reg)
-{
- return readl_relaxed(reg);
-}
-
-static inline void zx_writel(void __iomem *reg, u32 val)
-{
- writel_relaxed(val, reg);
-}
-
-static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
-{
- u32 tmp;
-
- tmp = zx_readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- zx_writel(reg, tmp);
-}
-
-#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
deleted file mode 100644
index cd79ca0a92a9..000000000000
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ /dev/null
@@ -1,760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/hdmi.h>
-#include <linux/irq.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include <sound/hdmi-codec.h>
-
-#include "zx_hdmi_regs.h"
-#include "zx_vou.h"
-
-#define ZX_HDMI_INFOFRAME_SIZE 31
-#define DDC_SEGMENT_ADDR 0x30
-
-struct zx_hdmi_i2c {
- struct i2c_adapter adap;
- struct mutex lock;
-};
-
-struct zx_hdmi {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct zx_hdmi_i2c *ddc;
- struct device *dev;
- struct drm_device *drm;
- void __iomem *mmio;
- struct clk *cec_clk;
- struct clk *osc_clk;
- struct clk *xclk;
- bool sink_is_hdmi;
- bool sink_has_audio;
- struct platform_device *audio_pdev;
-};
-
-#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
-
-static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
-{
- return readl_relaxed(hdmi->mmio + offset * 4);
-}
-
-static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
-{
- writel_relaxed(val, hdmi->mmio + offset * 4);
-}
-
-static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
- u8 mask, u8 val)
-{
- u8 tmp;
-
- tmp = hdmi_readb(hdmi, offset);
- tmp = (tmp & ~mask) | (val & mask);
- hdmi_writeb(hdmi, offset, tmp);
-}
-
-static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
- union hdmi_infoframe *frame, u8 fsel)
-{
- u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
- int num;
- int i;
-
- hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
-
- num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
- if (num < 0) {
- DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
- return num;
- }
-
- for (i = 0; i < num; i++)
- hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
-
- hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
- TPI_INFO_TRANS_RPT);
- hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
- TPI_INFO_TRANS_EN);
-
- return num;
-}
-
-static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int ret;
-
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- &hdmi->connector,
- mode);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
- ret);
- return ret;
- }
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
-}
-
-static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int ret;
-
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector,
- mode);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
- ret);
- return ret;
- }
-
- /* We always use YUV444 for HDMI output. */
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
-}
-
-static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- if (hdmi->sink_is_hdmi) {
- zx_hdmi_config_video_avi(hdmi, mode);
- zx_hdmi_config_video_vsi(hdmi, mode);
- }
-}
-
-static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
-{
- /* Copy from ZTE BSP code */
- hdmi_writeb(hdmi, 0x222, 0x0);
- hdmi_writeb(hdmi, 0x224, 0x4);
- hdmi_writeb(hdmi, 0x909, 0x0);
- hdmi_writeb(hdmi, 0x7b0, 0x90);
- hdmi_writeb(hdmi, 0x7b1, 0x00);
- hdmi_writeb(hdmi, 0x7b2, 0xa7);
- hdmi_writeb(hdmi, 0x7b8, 0xaa);
- hdmi_writeb(hdmi, 0x7b2, 0xa7);
- hdmi_writeb(hdmi, 0x7b3, 0x0f);
- hdmi_writeb(hdmi, 0x7b4, 0x0f);
- hdmi_writeb(hdmi, 0x7b5, 0x55);
- hdmi_writeb(hdmi, 0x7b7, 0x03);
- hdmi_writeb(hdmi, 0x7b9, 0x12);
- hdmi_writeb(hdmi, 0x7ba, 0x32);
- hdmi_writeb(hdmi, 0x7bc, 0x68);
- hdmi_writeb(hdmi, 0x7be, 0x40);
- hdmi_writeb(hdmi, 0x7bf, 0x84);
- hdmi_writeb(hdmi, 0x7c1, 0x0f);
- hdmi_writeb(hdmi, 0x7c8, 0x02);
- hdmi_writeb(hdmi, 0x7c9, 0x03);
- hdmi_writeb(hdmi, 0x7ca, 0x40);
- hdmi_writeb(hdmi, 0x7dc, 0x31);
- hdmi_writeb(hdmi, 0x7e2, 0x04);
- hdmi_writeb(hdmi, 0x7e0, 0x06);
- hdmi_writeb(hdmi, 0x7cb, 0x68);
- hdmi_writeb(hdmi, 0x7f9, 0x02);
- hdmi_writeb(hdmi, 0x7b6, 0x02);
- hdmi_writeb(hdmi, 0x7f3, 0x0);
-}
-
-static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
-{
- /* Enable pclk */
- hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
-
- /* Enable HDMI for TX */
- hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
-
- /* Enable deep color packet */
- hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
-
- /* Enable HDMI/MHL mode for output */
- hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
- TEST_TXCTRL_HDMI_MODE);
-
- /* Configure reg_qc_sel */
- hdmi_writeb(hdmi, HDMICTL4, 0x3);
-
- /* Enable interrupt */
- hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
- INTR1_MONITOR_DETECT);
-
- /* Start up phy */
- zx_hdmi_phy_start(hdmi);
-}
-
-static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
-{
- /* Disable interrupt */
- hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
-
- /* Disable deep color packet */
- hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
-
- /* Disable HDMI for TX */
- hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
-
- /* Disable pclk */
- hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
-}
-
-static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- clk_prepare_enable(hdmi->cec_clk);
- clk_prepare_enable(hdmi->osc_clk);
- clk_prepare_enable(hdmi->xclk);
-
- zx_hdmi_hw_enable(hdmi);
-
- vou_inf_enable(VOU_HDMI, encoder->crtc);
-}
-
-static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- vou_inf_disable(VOU_HDMI, encoder->crtc);
-
- zx_hdmi_hw_disable(hdmi);
-
- clk_disable_unprepare(hdmi->xclk);
- clk_disable_unprepare(hdmi->osc_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-}
-
-static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
- .enable = zx_hdmi_encoder_enable,
- .disable = zx_hdmi_encoder_disable,
- .mode_set = zx_hdmi_encoder_mode_set,
-};
-
-static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(connector);
- struct edid *edid;
- int ret;
-
- edid = drm_get_edid(connector, &hdmi->ddc->adap);
- if (!edid)
- return 0;
-
- hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
- hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return ret;
-}
-
-static enum drm_mode_status
-zx_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
- .get_modes = zx_hdmi_connector_get_modes,
- .mode_valid = zx_hdmi_connector_mode_valid,
-};
-
-static enum drm_connector_status
-zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(connector);
-
- return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
- connector_status_connected : connector_status_disconnected;
-}
-
-static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = zx_hdmi_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder;
-
- encoder->possible_crtcs = VOU_CRTC_MASK;
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &zx_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- &hdmi->ddc->adap);
- drm_connector_helper_add(&hdmi->connector,
- &zx_hdmi_connector_helper_funcs);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
-
-static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
-{
- struct zx_hdmi *hdmi = dev_id;
-
- drm_helper_hpd_irq_event(hdmi->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
-{
- struct zx_hdmi *hdmi = dev_id;
- u8 lstat;
-
- lstat = hdmi_readb(hdmi, L1_INTR_STAT);
-
- /* Monitor detect/HPD interrupt */
- if (lstat & L1_INTR_STAT_INTR1) {
- u8 stat;
-
- stat = hdmi_readb(hdmi, INTR1_STAT);
- hdmi_writeb(hdmi, INTR1_STAT, stat);
-
- if (stat & INTR1_MONITOR_DETECT)
- return IRQ_WAKE_THREAD;
- }
-
- return IRQ_NONE;
-}
-
-static int zx_hdmi_audio_startup(struct device *dev, void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &hdmi->encoder;
-
- vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF);
-
- return 0;
-}
-
-static void zx_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- /* Disable audio input */
- hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0);
-}
-
-static inline int zx_hdmi_audio_get_n(unsigned int fs)
-{
- unsigned int n;
-
- if (fs && (fs % 44100) == 0)
- n = 6272 * (fs / 44100);
- else
- n = fs * 128 / 1000;
-
- return n;
-}
-
-static int zx_hdmi_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_infoframe *cea = &params->cea;
- union hdmi_infoframe frame;
- int n;
-
- /* We only support spdif for now */
- if (daifmt->fmt != HDMI_SPDIF) {
- DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt);
- return -EINVAL;
- }
-
- switch (params->sample_width) {
- case 16:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_16BIT);
- break;
- case 20:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_20BIT);
- break;
- case 24:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_24BIT);
- break;
- default:
- DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n",
- params->sample_width);
- return -EINVAL;
- }
-
- /* CTS is calculated by hardware, and we only need to take care of N */
- n = zx_hdmi_audio_get_n(params->sample_rate);
- hdmi_writeb(hdmi, N_SVAL1, n & 0xff);
- hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff);
- hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf);
-
- /* Enable spdif mode */
- hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN);
-
- /* Enable audio input */
- hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN);
-
- memcpy(&frame.audio, cea, sizeof(*cea));
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO);
-}
-
-static int zx_hdmi_audio_mute(struct device *dev, void *data,
- bool enable, int direction)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- if (enable)
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE,
- TPI_AUD_MUTE);
- else
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0);
-
- return 0;
-}
-
-static int zx_hdmi_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &hdmi->connector;
-
- memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
-
- return 0;
-}
-
-static const struct hdmi_codec_ops zx_hdmi_codec_ops = {
- .audio_startup = zx_hdmi_audio_startup,
- .hw_params = zx_hdmi_audio_hw_params,
- .audio_shutdown = zx_hdmi_audio_shutdown,
- .mute_stream = zx_hdmi_audio_mute,
- .get_eld = zx_hdmi_audio_get_eld,
- .no_capture_mute = 1,
-};
-
-static struct hdmi_codec_pdata zx_hdmi_codec_pdata = {
- .ops = &zx_hdmi_codec_ops,
- .spdif = 1,
-};
-
-static int zx_hdmi_audio_register(struct zx_hdmi *hdmi)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &zx_hdmi_codec_pdata,
- sizeof(zx_hdmi_codec_pdata));
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- hdmi->audio_pdev = pdev;
-
- return 0;
-}
-
-static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
-{
- int len = msg->len;
- u8 *buf = msg->buf;
- int retry = 0;
- int ret = 0;
-
- /* Bits [9:8] of bytes */
- hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
- /* Bits [7:0] of bytes */
- hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
-
- /* Clear FIFO */
- hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
-
- /* Kick off the read */
- hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
- DDC_CMD_SEQUENTIAL_READ);
-
- while (len > 0) {
- int cnt, i;
-
- /* FIFO needs some time to get ready */
- usleep_range(500, 1000);
-
- cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
- if (cnt == 0) {
- if (++retry > 5) {
- DRM_DEV_ERROR(hdmi->dev,
- "DDC FIFO read timed out!");
- return -ETIMEDOUT;
- }
- continue;
- }
-
- for (i = 0; i < cnt; i++)
- *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
- len -= cnt;
- }
-
- return ret;
-}
-
-static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
-{
- /*
- * The DDC I2C adapter is only for reading EDID data, so we assume
- * that the write to this adapter must be the EDID data offset.
- */
- if ((msg->len != 1) ||
- ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
- return -EINVAL;
-
- if (msg->addr == DDC_SEGMENT_ADDR)
- hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
- else if (msg->addr == DDC_ADDR)
- hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
-
- hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
-
- return 0;
-}
-
-static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
- struct zx_hdmi_i2c *ddc = hdmi->ddc;
- int i, ret = 0;
-
- mutex_lock(&ddc->lock);
-
- /* Enable DDC master access */
- hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
-
- for (i = 0; i < num; i++) {
- DRM_DEV_DEBUG(hdmi->dev,
- "xfer: num: %d/%d, len: %d, flags: %#x\n",
- i + 1, num, msgs[i].len, msgs[i].flags);
-
- if (msgs[i].flags & I2C_M_RD)
- ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
- else
- ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
-
- if (ret < 0)
- break;
- }
-
- if (!ret)
- ret = num;
-
- /* Disable DDC master access */
- hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
-
- mutex_unlock(&ddc->lock);
-
- return ret;
-}
-
-static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm zx_hdmi_algorithm = {
- .master_xfer = zx_hdmi_i2c_xfer,
- .functionality = zx_hdmi_i2c_func,
-};
-
-static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
-{
- struct i2c_adapter *adap;
- struct zx_hdmi_i2c *ddc;
- int ret;
-
- ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
- if (!ddc)
- return -ENOMEM;
-
- hdmi->ddc = ddc;
- mutex_init(&ddc->lock);
-
- adap = &ddc->adap;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
- adap->dev.parent = hdmi->dev;
- adap->algo = &zx_hdmi_algorithm;
- snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
-
- ret = i2c_add_adapter(adap);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
- ret);
- return ret;
- }
-
- i2c_set_adapdata(adap, hdmi);
-
- return 0;
-}
-
-static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_hdmi *hdmi;
- int irq;
- int ret;
-
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
-
- hdmi->dev = dev;
- hdmi->drm = drm;
-
- dev_set_drvdata(dev, hdmi);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(hdmi->mmio)) {
- ret = PTR_ERR(hdmi->mmio);
- DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
- if (IS_ERR(hdmi->cec_clk)) {
- ret = PTR_ERR(hdmi->cec_clk);
- DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
- return ret;
- }
-
- hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
- if (IS_ERR(hdmi->osc_clk)) {
- ret = PTR_ERR(hdmi->osc_clk);
- DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
- return ret;
- }
-
- hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
- if (IS_ERR(hdmi->xclk)) {
- ret = PTR_ERR(hdmi->xclk);
- DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_ddc_register(hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_audio_register(hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_register(drm, hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
- return ret;
- }
-
- ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
- zx_hdmi_irq_thread, IRQF_SHARED,
- dev_name(dev), hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void zx_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- hdmi->connector.funcs->destroy(&hdmi->connector);
- hdmi->encoder.funcs->destroy(&hdmi->encoder);
-
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
-}
-
-static const struct component_ops zx_hdmi_component_ops = {
- .bind = zx_hdmi_bind,
- .unbind = zx_hdmi_unbind,
-};
-
-static int zx_hdmi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_hdmi_component_ops);
-}
-
-static int zx_hdmi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_hdmi_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_hdmi_of_match[] = {
- { .compatible = "zte,zx296718-hdmi", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
-
-struct platform_driver zx_hdmi_driver = {
- .probe = zx_hdmi_probe,
- .remove = zx_hdmi_remove,
- .driver = {
- .name = "zx-hdmi",
- .of_match_table = zx_hdmi_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
deleted file mode 100644
index 397949e64eff..000000000000
--- a/drivers/gpu/drm/zte/zx_hdmi_regs.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_HDMI_REGS_H__
-#define __ZX_HDMI_REGS_H__
-
-#define FUNC_SEL 0x000b
-#define FUNC_HDMI_EN BIT(0)
-#define CLKPWD 0x000d
-#define CLKPWD_PDIDCK BIT(2)
-#define P2T_CTRL 0x0066
-#define P2T_DC_PKT_EN BIT(7)
-#define L1_INTR_STAT 0x007e
-#define L1_INTR_STAT_INTR1 BIT(0)
-#define INTR1_STAT 0x008f
-#define INTR1_MASK 0x0095
-#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
-#define ZX_DDC_ADDR 0x00ed
-#define ZX_DDC_SEGM 0x00ee
-#define ZX_DDC_OFFSET 0x00ef
-#define ZX_DDC_DIN_CNT1 0x00f0
-#define ZX_DDC_DIN_CNT2 0x00f1
-#define ZX_DDC_CMD 0x00f3
-#define DDC_CMD_MASK 0xf
-#define DDC_CMD_CLEAR_FIFO 0x9
-#define DDC_CMD_SEQUENTIAL_READ 0x2
-#define ZX_DDC_DATA 0x00f4
-#define ZX_DDC_DOUT_CNT 0x00f5
-#define DDC_DOUT_CNT_MASK 0x1f
-#define TEST_TXCTRL 0x00f7
-#define TEST_TXCTRL_HDMI_MODE BIT(1)
-#define HDMICTL4 0x0235
-#define TPI_HPD_RSEN 0x063b
-#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
-#define TPI_INFO_FSEL 0x06bf
-#define FSEL_AVI 0
-#define FSEL_GBD 1
-#define FSEL_AUDIO 2
-#define FSEL_SPD 3
-#define FSEL_MPEG 4
-#define FSEL_VSIF 5
-#define TPI_INFO_B0 0x06c0
-#define TPI_INFO_EN 0x06df
-#define TPI_INFO_TRANS_EN BIT(7)
-#define TPI_INFO_TRANS_RPT BIT(6)
-#define TPI_DDC_MASTER_EN 0x06f8
-#define HW_DDC_MASTER BIT(7)
-#define N_SVAL1 0xa03
-#define N_SVAL2 0xa04
-#define N_SVAL3 0xa05
-#define AUD_EN 0xa13
-#define AUD_IN_EN BIT(0)
-#define AUD_MODE 0xa14
-#define SPDIF_EN BIT(1)
-#define TPI_AUD_CONFIG 0xa62
-#define SPDIF_SAMPLE_SIZE_SHIFT 6
-#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define TPI_AUD_MUTE BIT(4)
-
-#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
deleted file mode 100644
index 93bcca428e35..000000000000
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ /dev/null
@@ -1,537 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
-
-#include "zx_common_regs.h"
-#include "zx_drm_drv.h"
-#include "zx_plane.h"
-#include "zx_plane_regs.h"
-#include "zx_vou.h"
-
-static const uint32_t gl_formats[] = {
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ARGB4444,
-};
-
-static const uint32_t vl_formats[] = {
- DRM_FORMAT_NV12, /* Semi-planar YUV420 */
- DRM_FORMAT_YUV420, /* Planar YUV420 */
- DRM_FORMAT_YUYV, /* Packed YUV422 */
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YUV444, /* YUV444 8bit */
- /*
- * TODO: add formats below that HW supports:
- * - YUV420 P010
- * - YUV420 Hantro
- * - YUV444 10bit
- */
-};
-
-#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
-
-static int zx_vl_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
- int min_scale = FRAC_16_16(1, 8);
- int max_scale = FRAC_16_16(8, 1);
-
- if (!crtc || WARN_ON(!fb))
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- if (WARN_ON(!crtc_state))
- return -EINVAL;
-
- /* nothing to check when disabling or disabled */
- if (!crtc_state->enable)
- return 0;
-
- /* plane must be enabled */
- if (!plane_state->crtc)
- return -EINVAL;
-
- return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- min_scale, max_scale,
- true, true);
-}
-
-static int zx_vl_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- return VL_FMT_YUV420;
- case DRM_FORMAT_YUV420:
- return VL_YUV420_PLANAR | VL_FMT_YUV420;
- case DRM_FORMAT_YUYV:
- return VL_YUV422_YUYV | VL_FMT_YUV422;
- case DRM_FORMAT_YVYU:
- return VL_YUV422_YVYU | VL_FMT_YUV422;
- case DRM_FORMAT_UYVY:
- return VL_YUV422_UYVY | VL_FMT_YUV422;
- case DRM_FORMAT_VYUY:
- return VL_YUV422_VYUY | VL_FMT_YUV422;
- case DRM_FORMAT_YUV444:
- return VL_FMT_YUV444_8BIT;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline void zx_vl_set_update(struct zx_plane *zplane)
-{
- void __iomem *layer = zplane->layer;
-
- zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE);
-}
-
-static inline void zx_vl_rsz_set_update(struct zx_plane *zplane)
-{
- zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1);
-}
-
-static int zx_vl_rsz_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_YUV420:
- return RSZ_VL_FMT_YCBCR420;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return RSZ_VL_FMT_YCBCR422;
- case DRM_FORMAT_YUV444:
- return RSZ_VL_FMT_YCBCR444;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline u32 rsz_step_value(u32 src, u32 dst)
-{
- u32 val = 0;
-
- if (src == dst)
- val = 0;
- else if (src < dst)
- val = RSZ_PARA_STEP((src << 16) / dst);
- else if (src > dst)
- val = RSZ_DATA_STEP(src / dst) |
- RSZ_PARA_STEP(((src << 16) / dst) & 0xffff);
-
- return val;
-}
-
-static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format,
- u32 src_w, u32 src_h, u32 dst_w, u32 dst_h)
-{
- void __iomem *rsz = zplane->rsz;
- u32 src_chroma_w = src_w;
- u32 src_chroma_h = src_h;
- int fmt;
-
- /* Set up source and destination resolution */
- zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
- zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
-
- /* Configure data format for VL RSZ */
- fmt = zx_vl_rsz_get_fmt(format);
- if (fmt >= 0)
- zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt);
-
- /* Calculate Chroma height and width */
- if (fmt == RSZ_VL_FMT_YCBCR420) {
- src_chroma_w = src_w >> 1;
- src_chroma_h = src_h >> 1;
- } else if (fmt == RSZ_VL_FMT_YCBCR422) {
- src_chroma_w = src_w >> 1;
- }
-
- /* Set up Luma and Chroma step registers */
- zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w));
- zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h));
- zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w));
- zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h));
-
- zx_vl_rsz_set_update(zplane);
-}
-
-static void zx_vl_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_rect *src = &new_state->src;
- struct drm_rect *dst = &new_state->dst;
- struct drm_gem_cma_object *cma_obj;
- void __iomem *layer = zplane->layer;
- void __iomem *hbsc = zplane->hbsc;
- void __iomem *paddr_reg;
- dma_addr_t paddr;
- u32 src_x, src_y, src_w, src_h;
- u32 dst_x, dst_y, dst_w, dst_h;
- uint32_t format;
- int fmt;
- int i;
-
- if (!fb)
- return;
-
- format = fb->format->format;
-
- src_x = src->x1 >> 16;
- src_y = src->y1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- dst_x = dst->x1;
- dst_y = dst->y1;
- dst_w = drm_rect_width(dst);
- dst_h = drm_rect_height(dst);
-
- /* Set up data address registers for Y, Cb and Cr planes */
- paddr_reg = layer + VL_Y;
- for (i = 0; i < fb->format->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(fb, i);
- paddr = cma_obj->paddr + fb->offsets[i];
- paddr += src_y * fb->pitches[i];
- paddr += src_x * fb->format->cpp[i];
- zx_writel(paddr_reg, paddr);
- paddr_reg += 4;
- }
-
- /* Set up source height/width register */
- zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
-
- /* Set up start position register */
- zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
-
- /* Set up end position register */
- zx_writel(layer + VL_POS_END,
- GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
-
- /* Strides of Cb and Cr planes should be identical */
- zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) |
- CHROMA_STRIDE(fb->pitches[1]));
-
- /* Set up video layer data format */
- fmt = zx_vl_get_fmt(format);
- if (fmt >= 0)
- zx_writel(layer + VL_CTRL1, fmt);
-
- /* Always use scaler since it exists (set for not bypass) */
- zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE,
- VL_SCALER_BYPASS_MODE);
-
- zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h);
-
- /* Enable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
-
- zx_vou_layer_enable(plane);
-
- zx_vl_set_update(zplane);
-}
-
-static void zx_plane_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct zx_plane *zplane = to_zx_plane(plane);
- void __iomem *hbsc = zplane->hbsc;
-
- zx_vou_layer_disable(plane, old_state);
-
- /* Disable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
-}
-
-static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = {
- .atomic_check = zx_vl_plane_atomic_check,
- .atomic_update = zx_vl_plane_atomic_update,
- .atomic_disable = zx_plane_atomic_disable,
-};
-
-static int zx_gl_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
-
- if (!crtc || WARN_ON(!fb))
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- if (WARN_ON(!crtc_state))
- return -EINVAL;
-
- /* nothing to check when disabling or disabled */
- if (!crtc_state->enable)
- return 0;
-
- /* plane must be enabled */
- if (!plane_state->crtc)
- return -EINVAL;
-
- return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
-}
-
-static int zx_gl_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- return GL_FMT_ARGB8888;
- case DRM_FORMAT_RGB888:
- return GL_FMT_RGB888;
- case DRM_FORMAT_RGB565:
- return GL_FMT_RGB565;
- case DRM_FORMAT_ARGB1555:
- return GL_FMT_ARGB1555;
- case DRM_FORMAT_ARGB4444:
- return GL_FMT_ARGB4444;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline void zx_gl_set_update(struct zx_plane *zplane)
-{
- void __iomem *layer = zplane->layer;
-
- zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
-}
-
-static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
-{
- zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
-}
-
-static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
- u32 dst_w, u32 dst_h)
-{
- void __iomem *rsz = zplane->rsz;
-
- zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
- zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
-
- zx_gl_rsz_set_update(zplane);
-}
-
-static void zx_gl_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_cma_object *cma_obj;
- void __iomem *layer = zplane->layer;
- void __iomem *csc = zplane->csc;
- void __iomem *hbsc = zplane->hbsc;
- u32 src_x, src_y, src_w, src_h;
- u32 dst_x, dst_y, dst_w, dst_h;
- unsigned int bpp;
- uint32_t format;
- dma_addr_t paddr;
- u32 stride;
- int fmt;
-
- if (!fb)
- return;
-
- format = fb->format->format;
- stride = fb->pitches[0];
-
- src_x = new_state->src_x >> 16;
- src_y = new_state->src_y >> 16;
- src_w = new_state->src_w >> 16;
- src_h = new_state->src_h >> 16;
-
- dst_x = new_state->crtc_x;
- dst_y = new_state->crtc_y;
- dst_w = new_state->crtc_w;
- dst_h = new_state->crtc_h;
-
- bpp = fb->format->cpp[0];
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- paddr = cma_obj->paddr + fb->offsets[0];
- paddr += src_y * stride + src_x * bpp / 8;
- zx_writel(layer + GL_ADDR, paddr);
-
- /* Set up source height/width register */
- zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
-
- /* Set up start position register */
- zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
-
- /* Set up end position register */
- zx_writel(layer + GL_POS_END,
- GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
-
- /* Set up stride register */
- zx_writel(layer + GL_STRIDE, stride & 0xffff);
-
- /* Set up graphic layer data format */
- fmt = zx_gl_get_fmt(format);
- if (fmt >= 0)
- zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
- fmt << GL_DATA_FMT_SHIFT);
-
- /* Initialize global alpha with a sane value */
- zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
- 0xff << GL_GLOBAL_ALPHA_SHIFT);
-
- /* Setup CSC for the GL */
- if (dst_h > 720)
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
- else
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
-
- /* Always use scaler since it exists (set for not bypass) */
- zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
- GL_SCALER_BYPASS_MODE);
-
- zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
-
- /* Enable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
-
- zx_vou_layer_enable(plane);
-
- zx_gl_set_update(zplane);
-}
-
-static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
- .atomic_check = zx_gl_plane_atomic_check,
- .atomic_update = zx_gl_plane_atomic_update,
- .atomic_disable = zx_plane_atomic_disable,
-};
-
-static const struct drm_plane_funcs zx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-void zx_plane_set_update(struct drm_plane *plane)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
-
- /* Do nothing if the plane is not enabled */
- if (!plane->state->crtc)
- return;
-
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- zx_gl_rsz_set_update(zplane);
- zx_gl_set_update(zplane);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- zx_vl_rsz_set_update(zplane);
- zx_vl_set_update(zplane);
- break;
- default:
- WARN_ONCE(1, "unsupported plane type %d\n", plane->type);
- }
-}
-
-static void zx_plane_hbsc_init(struct zx_plane *zplane)
-{
- void __iomem *hbsc = zplane->hbsc;
-
- /*
- * Initialize HBSC block with a sane configuration per recommedation
- * from ZTE BSP code.
- */
- zx_writel(hbsc + HBSC_SATURATION, 0x200);
- zx_writel(hbsc + HBSC_HUE, 0x0);
- zx_writel(hbsc + HBSC_BRIGHT, 0x0);
- zx_writel(hbsc + HBSC_CONTRAST, 0x200);
-
- zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
- zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
- zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
-}
-
-int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
- enum drm_plane_type type)
-{
- const struct drm_plane_helper_funcs *helper;
- struct drm_plane *plane = &zplane->plane;
- struct device *dev = zplane->dev;
- const uint32_t *formats;
- unsigned int format_count;
- int ret;
-
- zx_plane_hbsc_init(zplane);
-
- switch (type) {
- case DRM_PLANE_TYPE_PRIMARY:
- helper = &zx_gl_plane_helper_funcs;
- formats = gl_formats;
- format_count = ARRAY_SIZE(gl_formats);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- helper = &zx_vl_plane_helper_funcs;
- formats = vl_formats;
- format_count = ARRAY_SIZE(vl_formats);
- break;
- default:
- return -ENODEV;
- }
-
- ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
- &zx_plane_funcs, formats, format_count,
- NULL, type, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
- return ret;
- }
-
- drm_plane_helper_add(plane, helper);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
deleted file mode 100644
index 5a7cc8b3b985..000000000000
--- a/drivers/gpu/drm/zte/zx_plane.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_PLANE_H__
-#define __ZX_PLANE_H__
-
-struct zx_plane {
- struct drm_plane plane;
- struct device *dev;
- void __iomem *layer;
- void __iomem *csc;
- void __iomem *hbsc;
- void __iomem *rsz;
- const struct vou_layer_bits *bits;
-};
-
-#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
-
-int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
- enum drm_plane_type type);
-void zx_plane_set_update(struct drm_plane *plane);
-
-#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
deleted file mode 100644
index ce830637a92d..000000000000
--- a/drivers/gpu/drm/zte/zx_plane_regs.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_PLANE_REGS_H__
-#define __ZX_PLANE_REGS_H__
-
-/* GL registers */
-#define GL_CTRL0 0x00
-#define GL_UPDATE BIT(5)
-#define GL_CTRL1 0x04
-#define GL_DATA_FMT_SHIFT 0
-#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
-#define GL_FMT_ARGB8888 0
-#define GL_FMT_RGB888 1
-#define GL_FMT_RGB565 2
-#define GL_FMT_ARGB1555 3
-#define GL_FMT_ARGB4444 4
-#define GL_CTRL2 0x08
-#define GL_GLOBAL_ALPHA_SHIFT 8
-#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
-#define GL_CTRL3 0x0c
-#define GL_SCALER_BYPASS_MODE BIT(0)
-#define GL_STRIDE 0x18
-#define GL_ADDR 0x1c
-#define GL_SRC_SIZE 0x38
-#define GL_SRC_W_SHIFT 16
-#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
-#define GL_SRC_H_SHIFT 0
-#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
-#define GL_POS_START 0x9c
-#define GL_POS_END 0xa0
-#define GL_POS_X_SHIFT 16
-#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
-#define GL_POS_Y_SHIFT 0
-#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
-
-#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
-#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
-#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
-#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
-
-/* VL registers */
-#define VL_CTRL0 0x00
-#define VL_UPDATE BIT(3)
-#define VL_CTRL1 0x04
-#define VL_YUV420_PLANAR BIT(5)
-#define VL_YUV422_SHIFT 3
-#define VL_YUV422_YUYV (0 << VL_YUV422_SHIFT)
-#define VL_YUV422_YVYU (1 << VL_YUV422_SHIFT)
-#define VL_YUV422_UYVY (2 << VL_YUV422_SHIFT)
-#define VL_YUV422_VYUY (3 << VL_YUV422_SHIFT)
-#define VL_FMT_YUV420 0
-#define VL_FMT_YUV422 1
-#define VL_FMT_YUV420_P010 2
-#define VL_FMT_YUV420_HANTRO 3
-#define VL_FMT_YUV444_8BIT 4
-#define VL_FMT_YUV444_10BIT 5
-#define VL_CTRL2 0x08
-#define VL_SCALER_BYPASS_MODE BIT(0)
-#define VL_STRIDE 0x0c
-#define LUMA_STRIDE_SHIFT 16
-#define LUMA_STRIDE_MASK (0xffff << LUMA_STRIDE_SHIFT)
-#define CHROMA_STRIDE_SHIFT 0
-#define CHROMA_STRIDE_MASK (0xffff << CHROMA_STRIDE_SHIFT)
-#define VL_SRC_SIZE 0x10
-#define VL_Y 0x14
-#define VL_POS_START 0x30
-#define VL_POS_END 0x34
-
-#define LUMA_STRIDE(x) (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK)
-#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK)
-
-/* RSZ registers */
-#define RSZ_SRC_CFG 0x00
-#define RSZ_DEST_CFG 0x04
-#define RSZ_ENABLE_CFG 0x14
-
-#define RSZ_VL_LUMA_HOR 0x08
-#define RSZ_VL_LUMA_VER 0x0c
-#define RSZ_VL_CHROMA_HOR 0x10
-#define RSZ_VL_CHROMA_VER 0x14
-#define RSZ_VL_CTRL_CFG 0x18
-#define RSZ_VL_FMT_SHIFT 3
-#define RSZ_VL_FMT_MASK (0x3 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR420 (0x0 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR422 (0x1 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR444 (0x2 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_ENABLE_CFG 0x1c
-
-#define RSZ_VER_SHIFT 16
-#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
-#define RSZ_HOR_SHIFT 0
-#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
-
-#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
-#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
-
-#define RSZ_DATA_STEP_SHIFT 16
-#define RSZ_DATA_STEP_MASK (0xffff << RSZ_DATA_STEP_SHIFT)
-#define RSZ_PARA_STEP_SHIFT 0
-#define RSZ_PARA_STEP_MASK (0xffff << RSZ_PARA_STEP_SHIFT)
-
-#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK)
-#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK)
-
-/* HBSC registers */
-#define HBSC_SATURATION 0x00
-#define HBSC_HUE 0x04
-#define HBSC_BRIGHT 0x08
-#define HBSC_CONTRAST 0x0c
-#define HBSC_THRESHOLD_COL1 0x10
-#define HBSC_THRESHOLD_COL2 0x14
-#define HBSC_THRESHOLD_COL3 0x18
-#define HBSC_CTRL0 0x28
-#define HBSC_CTRL_EN BIT(2)
-
-#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
deleted file mode 100644
index d8a89ba383bc..000000000000
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ /dev/null
@@ -1,400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2017 Linaro Ltd.
- * Copyright 2017 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "zx_drm_drv.h"
-#include "zx_tvenc_regs.h"
-#include "zx_vou.h"
-
-struct zx_tvenc_pwrctrl {
- struct regmap *regmap;
- u32 reg;
- u32 mask;
-};
-
-struct zx_tvenc {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct device *dev;
- void __iomem *mmio;
- const struct vou_inf *inf;
- struct zx_tvenc_pwrctrl pwrctrl;
-};
-
-#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x)
-
-struct zx_tvenc_mode {
- struct drm_display_mode mode;
- u32 video_info;
- u32 video_res;
- u32 field1_param;
- u32 field2_param;
- u32 burst_line_odd1;
- u32 burst_line_even1;
- u32 burst_line_odd2;
- u32 burst_line_even2;
- u32 line_timing_param;
- u32 weight_value;
- u32 blank_black_level;
- u32 burst_level;
- u32 control_param;
- u32 sub_carrier_phase1;
- u32 phase_line_incr_cvbs;
-};
-
-/*
- * The CRM cannot directly provide a suitable frequency, and we have to
- * ask a multiplied rate from CRM and use the divider in VOU to get the
- * desired one.
- */
-#define TVENC_CLOCK_MULTIPLIER 4
-
-static const struct zx_tvenc_mode tvenc_mode_pal = {
- .mode = {
- .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
- .hdisplay = 720,
- .hsync_start = 720 + 12,
- .hsync_end = 720 + 12 + 2,
- .htotal = 720 + 12 + 2 + 130,
- .vdisplay = 576,
- .vsync_start = 576 + 2,
- .vsync_end = 576 + 2 + 2,
- .vtotal = 576 + 2 + 2 + 20,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE,
- },
- .video_info = 0x00040040,
- .video_res = 0x05a9c760,
- .field1_param = 0x0004d416,
- .field2_param = 0x0009b94f,
- .burst_line_odd1 = 0x0004d406,
- .burst_line_even1 = 0x0009b53e,
- .burst_line_odd2 = 0x0004d805,
- .burst_line_even2 = 0x0009b93f,
- .line_timing_param = 0x06a96fdf,
- .weight_value = 0x00c188a0,
- .blank_black_level = 0x0000fcfc,
- .burst_level = 0x00001595,
- .control_param = 0x00000001,
- .sub_carrier_phase1 = 0x1504c566,
- .phase_line_incr_cvbs = 0xc068db8c,
-};
-
-static const struct zx_tvenc_mode tvenc_mode_ntsc = {
- .mode = {
- .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
- .hdisplay = 720,
- .hsync_start = 720 + 16,
- .hsync_end = 720 + 16 + 2,
- .htotal = 720 + 16 + 2 + 120,
- .vdisplay = 480,
- .vsync_start = 480 + 3,
- .vsync_end = 480 + 3 + 2,
- .vtotal = 480 + 3 + 2 + 17,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE,
- },
- .video_info = 0x00040080,
- .video_res = 0x05a8375a,
- .field1_param = 0x00041817,
- .field2_param = 0x0008351e,
- .burst_line_odd1 = 0x00041006,
- .burst_line_even1 = 0x0008290d,
- .burst_line_odd2 = 0x00000000,
- .burst_line_even2 = 0x00000000,
- .line_timing_param = 0x06a8ef9e,
- .weight_value = 0x00b68197,
- .blank_black_level = 0x0000f0f0,
- .burst_level = 0x0000009c,
- .control_param = 0x00000001,
- .sub_carrier_phase1 = 0x10f83e10,
- .phase_line_incr_cvbs = 0x80000000,
-};
-
-static const struct zx_tvenc_mode *tvenc_modes[] = {
- &tvenc_mode_pal,
- &tvenc_mode_ntsc,
-};
-
-static const struct zx_tvenc_mode *
-zx_tvenc_find_zmode(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
- const struct zx_tvenc_mode *zmode = tvenc_modes[i];
-
- if (drm_mode_equal(mode, &zmode->mode))
- return zmode;
- }
-
- return NULL;
-}
-
-static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- const struct zx_tvenc_mode *zmode;
- struct vou_div_config configs[] = {
- { VOU_DIV_INF, VOU_DIV_4 },
- { VOU_DIV_TVENC, VOU_DIV_1 },
- { VOU_DIV_LAYER, VOU_DIV_2 },
- };
-
- zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs));
-
- zmode = zx_tvenc_find_zmode(mode);
- if (!zmode) {
- DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n");
- return;
- }
-
- zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info);
- zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res);
- zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param);
- zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param);
- zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1);
- zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1);
- zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2);
- zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2);
- zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM,
- zmode->line_timing_param);
- zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value);
- zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL,
- zmode->blank_black_level);
- zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level);
- zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param);
- zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1,
- zmode->sub_carrier_phase1);
- zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS,
- zmode->phase_line_incr_cvbs);
-}
-
-static void zx_tvenc_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
-
- /* Set bit to power up TVENC DAC */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
- pwrctrl->mask);
-
- vou_inf_enable(VOU_TV_ENC, encoder->crtc);
-
- zx_writel(tvenc->mmio + VENC_ENABLE, 1);
-}
-
-static void zx_tvenc_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
-
- zx_writel(tvenc->mmio + VENC_ENABLE, 0);
-
- vou_inf_disable(VOU_TV_ENC, encoder->crtc);
-
- /* Clear bit to power down TVENC DAC */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
-}
-
-static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
- .enable = zx_tvenc_encoder_enable,
- .disable = zx_tvenc_encoder_disable,
- .mode_set = zx_tvenc_encoder_mode_set,
-};
-
-static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(connector);
- struct device *dev = tvenc->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
- const struct zx_tvenc_mode *zmode = tvenc_modes[i];
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &zmode->mode);
- if (!mode) {
- DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n");
- continue;
- }
-
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return i;
-}
-
-static enum drm_mode_status
-zx_tvenc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(connector);
- const struct zx_tvenc_mode *zmode;
-
- zmode = zx_tvenc_find_zmode(mode);
- if (!zmode) {
- DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name);
- return MODE_NOMODE;
- }
-
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = {
- .get_modes = zx_tvenc_connector_get_modes,
- .mode_valid = zx_tvenc_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs zx_tvenc_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
-{
- struct drm_encoder *encoder = &tvenc->encoder;
- struct drm_connector *connector = &tvenc->connector;
-
- /*
- * The tvenc is designed to use aux channel, as there is a deflicker
- * block for the channel.
- */
- encoder->possible_crtcs = BIT(1);
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
-
- connector->interlace_allowed = true;
-
- drm_connector_init(drm, connector, &zx_tvenc_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
- drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
-
- drm_connector_attach_encoder(connector, encoder);
-
- return 0;
-}
-
-static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc)
-{
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
- struct device *dev = tvenc->dev;
- struct of_phandle_args out_args;
- struct regmap *regmap;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,tvenc-power-control", 2, 0, &out_args);
- if (ret)
- return ret;
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto out;
- }
-
- pwrctrl->regmap = regmap;
- pwrctrl->reg = out_args.args[0];
- pwrctrl->mask = out_args.args[1];
-
-out:
- of_node_put(out_args.np);
- return ret;
-}
-
-static int zx_tvenc_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_tvenc *tvenc;
- int ret;
-
- tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL);
- if (!tvenc)
- return -ENOMEM;
-
- tvenc->dev = dev;
- dev_set_drvdata(dev, tvenc);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tvenc->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(tvenc->mmio)) {
- ret = PTR_ERR(tvenc->mmio);
- DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret);
- return ret;
- }
-
- ret = zx_tvenc_pwrctrl_init(tvenc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
- return ret;
- }
-
- ret = zx_tvenc_register(drm, tvenc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void zx_tvenc_unbind(struct device *dev, struct device *master,
- void *data)
-{
- /* Nothing to do */
-}
-
-static const struct component_ops zx_tvenc_component_ops = {
- .bind = zx_tvenc_bind,
- .unbind = zx_tvenc_unbind,
-};
-
-static int zx_tvenc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_tvenc_component_ops);
-}
-
-static int zx_tvenc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_tvenc_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_tvenc_of_match[] = {
- { .compatible = "zte,zx296718-tvenc", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_tvenc_of_match);
-
-struct platform_driver zx_tvenc_driver = {
- .probe = zx_tvenc_probe,
- .remove = zx_tvenc_remove,
- .driver = {
- .name = "zx-tvenc",
- .of_match_table = zx_tvenc_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h
deleted file mode 100644
index 40f033109374..000000000000
--- a/drivers/gpu/drm/zte/zx_tvenc_regs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2017 Linaro Ltd.
- * Copyright 2017 ZTE Corporation.
- */
-
-#ifndef __ZX_TVENC_REGS_H__
-#define __ZX_TVENC_REGS_H__
-
-#define VENC_VIDEO_INFO 0x04
-#define VENC_VIDEO_RES 0x08
-#define VENC_FIELD1_PARAM 0x10
-#define VENC_FIELD2_PARAM 0x14
-#define VENC_LINE_O_1 0x18
-#define VENC_LINE_E_1 0x1c
-#define VENC_LINE_O_2 0x20
-#define VENC_LINE_E_2 0x24
-#define VENC_LINE_TIMING_PARAM 0x28
-#define VENC_WEIGHT_VALUE 0x2c
-#define VENC_BLANK_BLACK_LEVEL 0x30
-#define VENC_BURST_LEVEL 0x34
-#define VENC_CONTROL_PARAM 0x3c
-#define VENC_SUB_CARRIER_PHASE1 0x40
-#define VENC_PHASE_LINE_INCR_CVBS 0x48
-#define VENC_ENABLE 0xa8
-
-#endif /* __ZX_TVENC_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
deleted file mode 100644
index 0f9bbb7e3b8d..000000000000
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ /dev/null
@@ -1,527 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "zx_drm_drv.h"
-#include "zx_vga_regs.h"
-#include "zx_vou.h"
-
-struct zx_vga_pwrctrl {
- struct regmap *regmap;
- u32 reg;
- u32 mask;
-};
-
-struct zx_vga_i2c {
- struct i2c_adapter adap;
- struct mutex lock;
-};
-
-struct zx_vga {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct zx_vga_i2c *ddc;
- struct device *dev;
- void __iomem *mmio;
- struct clk *i2c_wclk;
- struct zx_vga_pwrctrl pwrctrl;
- struct completion complete;
- bool connected;
-};
-
-#define to_zx_vga(x) container_of(x, struct zx_vga, x)
-
-static void zx_vga_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_vga *vga = to_zx_vga(encoder);
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
-
- /* Set bit to power up VGA DACs */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
- pwrctrl->mask);
-
- vou_inf_enable(VOU_VGA, encoder->crtc);
-}
-
-static void zx_vga_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_vga *vga = to_zx_vga(encoder);
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
-
- vou_inf_disable(VOU_VGA, encoder->crtc);
-
- /* Clear bit to power down VGA DACs */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
-}
-
-static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
- .enable = zx_vga_encoder_enable,
- .disable = zx_vga_encoder_disable,
-};
-
-static int zx_vga_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_vga *vga = to_zx_vga(connector);
- struct edid *edid;
- int ret;
-
- /*
- * Clear both detection bits to switch I2C bus from device
- * detecting to EDID reading.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, 0);
-
- edid = drm_get_edid(connector, &vga->ddc->adap);
- if (!edid) {
- /*
- * If EDID reading fails, we set the device state into
- * disconnected. Locking is not required here, since the
- * VGA_AUTO_DETECT_SEL register write in irq handler cannot
- * be triggered when both detection bits are cleared as above.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL,
- VGA_DETECT_SEL_NO_DEVICE);
- vga->connected = false;
- return 0;
- }
-
- /*
- * As edid reading succeeds, device must be connected, so we set
- * up detection bit for unplug interrupt here.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return ret;
-}
-
-static enum drm_mode_status
-zx_vga_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_vga_connector_helper_funcs = {
- .get_modes = zx_vga_connector_get_modes,
- .mode_valid = zx_vga_connector_mode_valid,
-};
-
-static enum drm_connector_status
-zx_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct zx_vga *vga = to_zx_vga(connector);
-
- return vga->connected ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static const struct drm_connector_funcs zx_vga_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = zx_vga_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
-{
- struct drm_encoder *encoder = &vga->encoder;
- struct drm_connector *connector = &vga->connector;
- struct device *dev = vga->dev;
- int ret;
-
- encoder->possible_crtcs = VOU_CRTC_MASK;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
- return ret;
- }
-
- drm_encoder_helper_add(encoder, &zx_vga_encoder_helper_funcs);
-
- vga->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init_with_ddc(drm, connector,
- &zx_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &vga->ddc->adap);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret);
- goto clean_encoder;
- }
-
- drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
- goto clean_connector;
- }
-
- return 0;
-
-clean_connector:
- drm_connector_cleanup(connector);
-clean_encoder:
- drm_encoder_cleanup(encoder);
- return ret;
-}
-
-static int zx_vga_pwrctrl_init(struct zx_vga *vga)
-{
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
- struct device *dev = vga->dev;
- struct of_phandle_args out_args;
- struct regmap *regmap;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,vga-power-control", 2, 0, &out_args);
- if (ret)
- return ret;
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto out;
- }
-
- pwrctrl->regmap = regmap;
- pwrctrl->reg = out_args.args[0];
- pwrctrl->mask = out_args.args[1];
-
-out:
- of_node_put(out_args.np);
- return ret;
-}
-
-static int zx_vga_i2c_read(struct zx_vga *vga, struct i2c_msg *msg)
-{
- int len = msg->len;
- u8 *buf = msg->buf;
- u32 offset = 0;
- int i;
-
- reinit_completion(&vga->complete);
-
- /* Select combo write */
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_COMBO, VGA_CMD_COMBO);
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_RW, 0);
-
- while (len > 0) {
- u32 cnt;
-
- /* Clear RX FIFO */
- zx_writel_mask(vga->mmio + VGA_RXF_CTRL, VGA_RX_FIFO_CLEAR,
- VGA_RX_FIFO_CLEAR);
-
- /* Data offset to read from */
- zx_writel(vga->mmio + VGA_SUB_ADDR, offset);
-
- /* Kick off the transfer */
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS,
- VGA_CMD_TRANS);
-
- if (!wait_for_completion_timeout(&vga->complete,
- msecs_to_jiffies(1000))) {
- DRM_DEV_ERROR(vga->dev, "transfer timeout\n");
- return -ETIMEDOUT;
- }
-
- cnt = zx_readl(vga->mmio + VGA_RXF_STATUS);
- cnt = (cnt & VGA_RXF_COUNT_MASK) >> VGA_RXF_COUNT_SHIFT;
- /* FIFO status may report more data than we need to read */
- cnt = min_t(u32, len, cnt);
-
- for (i = 0; i < cnt; i++)
- *buf++ = zx_readl(vga->mmio + VGA_DATA);
-
- len -= cnt;
- offset += cnt;
- }
-
- return 0;
-}
-
-static int zx_vga_i2c_write(struct zx_vga *vga, struct i2c_msg *msg)
-{
- /*
- * The DDC I2C adapter is only for reading EDID data, so we assume
- * that the write to this adapter must be the EDID data offset.
- */
- if ((msg->len != 1) || ((msg->addr != DDC_ADDR)))
- return -EINVAL;
-
- /* Hardware will take care of the slave address shifting */
- zx_writel(vga->mmio + VGA_DEVICE_ADDR, msg->addr);
-
- return 0;
-}
-
-static int zx_vga_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct zx_vga *vga = i2c_get_adapdata(adap);
- struct zx_vga_i2c *ddc = vga->ddc;
- int ret = 0;
- int i;
-
- mutex_lock(&ddc->lock);
-
- for (i = 0; i < num; i++) {
- if (msgs[i].flags & I2C_M_RD)
- ret = zx_vga_i2c_read(vga, &msgs[i]);
- else
- ret = zx_vga_i2c_write(vga, &msgs[i]);
-
- if (ret < 0)
- break;
- }
-
- if (!ret)
- ret = num;
-
- mutex_unlock(&ddc->lock);
-
- return ret;
-}
-
-static u32 zx_vga_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm zx_vga_algorithm = {
- .master_xfer = zx_vga_i2c_xfer,
- .functionality = zx_vga_i2c_func,
-};
-
-static int zx_vga_ddc_register(struct zx_vga *vga)
-{
- struct device *dev = vga->dev;
- struct i2c_adapter *adap;
- struct zx_vga_i2c *ddc;
- int ret;
-
- ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL);
- if (!ddc)
- return -ENOMEM;
-
- vga->ddc = ddc;
- mutex_init(&ddc->lock);
-
- adap = &ddc->adap;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
- adap->dev.parent = dev;
- adap->algo = &zx_vga_algorithm;
- snprintf(adap->name, sizeof(adap->name), "zx vga i2c");
-
- ret = i2c_add_adapter(adap);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to add I2C adapter: %d\n", ret);
- return ret;
- }
-
- i2c_set_adapdata(adap, vga);
-
- return 0;
-}
-
-static irqreturn_t zx_vga_irq_thread(int irq, void *dev_id)
-{
- struct zx_vga *vga = dev_id;
-
- drm_helper_hpd_irq_event(vga->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t zx_vga_irq_handler(int irq, void *dev_id)
-{
- struct zx_vga *vga = dev_id;
- u32 status;
-
- status = zx_readl(vga->mmio + VGA_I2C_STATUS);
-
- /* Clear interrupt status */
- zx_writel_mask(vga->mmio + VGA_I2C_STATUS, VGA_CLEAR_IRQ,
- VGA_CLEAR_IRQ);
-
- if (status & VGA_DEVICE_CONNECTED) {
- /*
- * Since VGA_DETECT_SEL bits need to be reset for switching DDC
- * bus from device detection to EDID read, rather than setting
- * up HAS_DEVICE bit here, we need to do that in .get_modes
- * hook for unplug detecting after EDID read succeeds.
- */
- vga->connected = true;
- return IRQ_WAKE_THREAD;
- }
-
- if (status & VGA_DEVICE_DISCONNECTED) {
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL,
- VGA_DETECT_SEL_NO_DEVICE);
- vga->connected = false;
- return IRQ_WAKE_THREAD;
- }
-
- if (status & VGA_TRANS_DONE) {
- complete(&vga->complete);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void zx_vga_hw_init(struct zx_vga *vga)
-{
- unsigned long ref = clk_get_rate(vga->i2c_wclk);
- int div;
-
- /*
- * Set up I2C fast speed divider per formula below to get 400kHz.
- * scl = ref / ((div + 1) * 4)
- */
- div = DIV_ROUND_UP(ref / 1000, 400 * 4) - 1;
- zx_writel(vga->mmio + VGA_CLK_DIV_FS, div);
-
- /* Set up device detection */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_PARA, 0x80);
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_NO_DEVICE);
-
- /*
- * We need to poke monitor via DDC bus to get connection irq
- * start working.
- */
- zx_writel(vga->mmio + VGA_DEVICE_ADDR, DDC_ADDR);
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS, VGA_CMD_TRANS);
-}
-
-static int zx_vga_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_vga *vga;
- int irq;
- int ret;
-
- vga = devm_kzalloc(dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
-
- vga->dev = dev;
- dev_set_drvdata(dev, vga);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vga->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(vga->mmio))
- return PTR_ERR(vga->mmio);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- vga->i2c_wclk = devm_clk_get(dev, "i2c_wclk");
- if (IS_ERR(vga->i2c_wclk)) {
- ret = PTR_ERR(vga->i2c_wclk);
- DRM_DEV_ERROR(dev, "failed to get i2c_wclk: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_pwrctrl_init(vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_ddc_register(vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_register(drm, vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register vga: %d\n", ret);
- return ret;
- }
-
- init_completion(&vga->complete);
-
- ret = devm_request_threaded_irq(dev, irq, zx_vga_irq_handler,
- zx_vga_irq_thread, IRQF_SHARED,
- dev_name(dev), vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(vga->i2c_wclk);
- if (ret)
- return ret;
-
- zx_vga_hw_init(vga);
-
- return 0;
-}
-
-static void zx_vga_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_vga *vga = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vga->i2c_wclk);
-}
-
-static const struct component_ops zx_vga_component_ops = {
- .bind = zx_vga_bind,
- .unbind = zx_vga_unbind,
-};
-
-static int zx_vga_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_vga_component_ops);
-}
-
-static int zx_vga_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_vga_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_vga_of_match[] = {
- { .compatible = "zte,zx296718-vga", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_vga_of_match);
-
-struct platform_driver zx_vga_driver = {
- .probe = zx_vga_probe,
- .remove = zx_vga_remove,
- .driver = {
- .name = "zx-vga",
- .of_match_table = zx_vga_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_vga_regs.h b/drivers/gpu/drm/zte/zx_vga_regs.h
deleted file mode 100644
index 1e8825ae70a5..000000000000
--- a/drivers/gpu/drm/zte/zx_vga_regs.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __ZX_VGA_REGS_H__
-#define __ZX_VGA_REGS_H__
-
-#define VGA_CMD_CFG 0x04
-#define VGA_CMD_TRANS BIT(6)
-#define VGA_CMD_COMBO BIT(5)
-#define VGA_CMD_RW BIT(4)
-#define VGA_SUB_ADDR 0x0c
-#define VGA_DEVICE_ADDR 0x10
-#define VGA_CLK_DIV_FS 0x14
-#define VGA_RXF_CTRL 0x20
-#define VGA_RX_FIFO_CLEAR BIT(7)
-#define VGA_DATA 0x24
-#define VGA_I2C_STATUS 0x28
-#define VGA_DEVICE_DISCONNECTED BIT(7)
-#define VGA_DEVICE_CONNECTED BIT(6)
-#define VGA_CLEAR_IRQ BIT(4)
-#define VGA_TRANS_DONE BIT(0)
-#define VGA_RXF_STATUS 0x30
-#define VGA_RXF_COUNT_SHIFT 2
-#define VGA_RXF_COUNT_MASK GENMASK(7, 2)
-#define VGA_AUTO_DETECT_PARA 0x34
-#define VGA_AUTO_DETECT_SEL 0x38
-#define VGA_DETECT_SEL_HAS_DEVICE BIT(1)
-#define VGA_DETECT_SEL_NO_DEVICE BIT(0)
-
-#endif /* __ZX_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
deleted file mode 100644
index 904f62f3bfc1..000000000000
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ /dev/null
@@ -1,921 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "zx_common_regs.h"
-#include "zx_drm_drv.h"
-#include "zx_plane.h"
-#include "zx_vou.h"
-#include "zx_vou_regs.h"
-
-#define GL_NUM 2
-#define VL_NUM 3
-
-enum vou_chn_type {
- VOU_CHN_MAIN,
- VOU_CHN_AUX,
-};
-
-struct zx_crtc_regs {
- u32 fir_active;
- u32 fir_htiming;
- u32 fir_vtiming;
- u32 sec_vtiming;
- u32 timing_shift;
- u32 timing_pi_shift;
-};
-
-static const struct zx_crtc_regs main_crtc_regs = {
- .fir_active = FIR_MAIN_ACTIVE,
- .fir_htiming = FIR_MAIN_H_TIMING,
- .fir_vtiming = FIR_MAIN_V_TIMING,
- .sec_vtiming = SEC_MAIN_V_TIMING,
- .timing_shift = TIMING_MAIN_SHIFT,
- .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
-};
-
-static const struct zx_crtc_regs aux_crtc_regs = {
- .fir_active = FIR_AUX_ACTIVE,
- .fir_htiming = FIR_AUX_H_TIMING,
- .fir_vtiming = FIR_AUX_V_TIMING,
- .sec_vtiming = SEC_AUX_V_TIMING,
- .timing_shift = TIMING_AUX_SHIFT,
- .timing_pi_shift = TIMING_AUX_PI_SHIFT,
-};
-
-struct zx_crtc_bits {
- u32 polarity_mask;
- u32 polarity_shift;
- u32 int_frame_mask;
- u32 tc_enable;
- u32 sec_vactive_shift;
- u32 sec_vactive_mask;
- u32 interlace_select;
- u32 pi_enable;
- u32 div_vga_shift;
- u32 div_pic_shift;
- u32 div_tvenc_shift;
- u32 div_hdmi_pnx_shift;
- u32 div_hdmi_shift;
- u32 div_inf_shift;
- u32 div_layer_shift;
-};
-
-static const struct zx_crtc_bits main_crtc_bits = {
- .polarity_mask = MAIN_POL_MASK,
- .polarity_shift = MAIN_POL_SHIFT,
- .int_frame_mask = TIMING_INT_MAIN_FRAME,
- .tc_enable = MAIN_TC_EN,
- .sec_vactive_shift = SEC_VACT_MAIN_SHIFT,
- .sec_vactive_mask = SEC_VACT_MAIN_MASK,
- .interlace_select = MAIN_INTERLACE_SEL,
- .pi_enable = MAIN_PI_EN,
- .div_vga_shift = VGA_MAIN_DIV_SHIFT,
- .div_pic_shift = PIC_MAIN_DIV_SHIFT,
- .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT,
- .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT,
- .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT,
- .div_inf_shift = INF_MAIN_DIV_SHIFT,
- .div_layer_shift = LAYER_MAIN_DIV_SHIFT,
-};
-
-static const struct zx_crtc_bits aux_crtc_bits = {
- .polarity_mask = AUX_POL_MASK,
- .polarity_shift = AUX_POL_SHIFT,
- .int_frame_mask = TIMING_INT_AUX_FRAME,
- .tc_enable = AUX_TC_EN,
- .sec_vactive_shift = SEC_VACT_AUX_SHIFT,
- .sec_vactive_mask = SEC_VACT_AUX_MASK,
- .interlace_select = AUX_INTERLACE_SEL,
- .pi_enable = AUX_PI_EN,
- .div_vga_shift = VGA_AUX_DIV_SHIFT,
- .div_pic_shift = PIC_AUX_DIV_SHIFT,
- .div_tvenc_shift = TVENC_AUX_DIV_SHIFT,
- .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT,
- .div_hdmi_shift = HDMI_AUX_DIV_SHIFT,
- .div_inf_shift = INF_AUX_DIV_SHIFT,
- .div_layer_shift = LAYER_AUX_DIV_SHIFT,
-};
-
-struct zx_crtc {
- struct drm_crtc crtc;
- struct drm_plane *primary;
- struct zx_vou_hw *vou;
- void __iomem *chnreg;
- void __iomem *chncsc;
- void __iomem *dither;
- const struct zx_crtc_regs *regs;
- const struct zx_crtc_bits *bits;
- enum vou_chn_type chn_type;
- struct clk *pixclk;
-};
-
-#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
-
-struct vou_layer_bits {
- u32 enable;
- u32 chnsel;
- u32 clksel;
-};
-
-static const struct vou_layer_bits zx_gl_bits[GL_NUM] = {
- {
- .enable = OSD_CTRL0_GL0_EN,
- .chnsel = OSD_CTRL0_GL0_SEL,
- .clksel = VOU_CLK_GL0_SEL,
- }, {
- .enable = OSD_CTRL0_GL1_EN,
- .chnsel = OSD_CTRL0_GL1_SEL,
- .clksel = VOU_CLK_GL1_SEL,
- },
-};
-
-static const struct vou_layer_bits zx_vl_bits[VL_NUM] = {
- {
- .enable = OSD_CTRL0_VL0_EN,
- .chnsel = OSD_CTRL0_VL0_SEL,
- .clksel = VOU_CLK_VL0_SEL,
- }, {
- .enable = OSD_CTRL0_VL1_EN,
- .chnsel = OSD_CTRL0_VL1_SEL,
- .clksel = VOU_CLK_VL1_SEL,
- }, {
- .enable = OSD_CTRL0_VL2_EN,
- .chnsel = OSD_CTRL0_VL2_SEL,
- .clksel = VOU_CLK_VL2_SEL,
- },
-};
-
-struct zx_vou_hw {
- struct device *dev;
- void __iomem *osd;
- void __iomem *timing;
- void __iomem *vouctl;
- void __iomem *otfppu;
- void __iomem *dtrc;
- struct clk *axi_clk;
- struct clk *ppu_clk;
- struct clk *main_clk;
- struct clk *aux_clk;
- struct zx_crtc *main_crtc;
- struct zx_crtc *aux_crtc;
-};
-
-enum vou_inf_data_sel {
- VOU_YUV444 = 0,
- VOU_RGB_101010 = 1,
- VOU_RGB_888 = 2,
- VOU_RGB_666 = 3,
-};
-
-struct vou_inf {
- enum vou_inf_id id;
- enum vou_inf_data_sel data_sel;
- u32 clocks_en_bits;
- u32 clocks_sel_bits;
-};
-
-static struct vou_inf vou_infs[] = {
- [VOU_HDMI] = {
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
- .clocks_sel_bits = BIT(13) | BIT(2),
- },
- [VOU_TV_ENC] = {
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(15),
- .clocks_sel_bits = BIT(11) | BIT(0),
- },
- [VOU_VGA] = {
- .data_sel = VOU_RGB_888,
- .clocks_en_bits = BIT(1),
- .clocks_sel_bits = BIT(10),
- },
-};
-
-static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
-
- return zcrtc->vou;
-}
-
-void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
- enum vou_inf_hdmi_audio aud)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
-
- zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud);
-}
-
-void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct vou_inf *inf = &vou_infs[id];
- void __iomem *dither = zcrtc->dither;
- void __iomem *csc = zcrtc->chncsc;
- bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
- u32 data_sel_shift = id << 1;
-
- if (inf->data_sel != VOU_YUV444) {
- /* Enable channel CSC for RGB output */
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT709_IMAGE_YCBCR2RGB << CSC_COV_MODE_SHIFT);
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE,
- CSC_WORK_ENABLE);
-
- /* Bypass Dither block for RGB output */
- zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS,
- DITHER_BYSPASS);
- } else {
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, 0);
- zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS, 0);
- }
-
- /* Select data format */
- zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
- inf->data_sel << data_sel_shift);
-
- /* Select channel */
- zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id,
- zcrtc->chn_type << id);
-
- /* Select interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
- is_main ? 0 : inf->clocks_sel_bits);
-
- /* Enable interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
- inf->clocks_en_bits);
-
- /* Enable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id);
-}
-
-void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc)
-{
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
- struct vou_inf *inf = &vou_infs[id];
-
- /* Disable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0);
-
- /* Disable interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
-}
-
-void zx_vou_config_dividers(struct drm_crtc *crtc,
- struct vou_div_config *configs, int num)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- const struct zx_crtc_bits *bits = zcrtc->bits;
- int i;
-
- /* Clear update flag bit */
- zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0);
-
- for (i = 0; i < num; i++) {
- struct vou_div_config *cfg = configs + i;
- u32 reg, shift;
-
- switch (cfg->id) {
- case VOU_DIV_VGA:
- reg = VOU_CLK_SEL;
- shift = bits->div_vga_shift;
- break;
- case VOU_DIV_PIC:
- reg = VOU_CLK_SEL;
- shift = bits->div_pic_shift;
- break;
- case VOU_DIV_TVENC:
- reg = VOU_DIV_PARA;
- shift = bits->div_tvenc_shift;
- break;
- case VOU_DIV_HDMI_PNX:
- reg = VOU_DIV_PARA;
- shift = bits->div_hdmi_pnx_shift;
- break;
- case VOU_DIV_HDMI:
- reg = VOU_DIV_PARA;
- shift = bits->div_hdmi_shift;
- break;
- case VOU_DIV_INF:
- reg = VOU_DIV_PARA;
- shift = bits->div_inf_shift;
- break;
- case VOU_DIV_LAYER:
- reg = VOU_DIV_PARA;
- shift = bits->div_layer_shift;
- break;
- default:
- continue;
- }
-
- /* Each divider occupies 3 bits */
- zx_writel_mask(vou->vouctl + reg, 0x7 << shift,
- cfg->val << shift);
- }
-
- /* Set update flag bit to get dividers effected */
- zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE,
- DIV_PARA_UPDATE);
-}
-
-static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
-{
- zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
-}
-
-static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- const struct zx_crtc_regs *regs = zcrtc->regs;
- const struct zx_crtc_bits *bits = zcrtc->bits;
- struct videomode vm;
- u32 scan_mask;
- u32 pol = 0;
- u32 val;
- int ret;
-
- drm_display_mode_to_videomode(mode, &vm);
-
- /* Set up timing parameters */
- val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1);
- val |= H_ACTIVE(vm.hactive - 1);
- zx_writel(vou->timing + regs->fir_active, val);
-
- val = SYNC_WIDE(vm.hsync_len - 1);
- val |= BACK_PORCH(vm.hback_porch - 1);
- val |= FRONT_PORCH(vm.hfront_porch - 1);
- zx_writel(vou->timing + regs->fir_htiming, val);
-
- val = SYNC_WIDE(vm.vsync_len - 1);
- val |= BACK_PORCH(vm.vback_porch - 1);
- val |= FRONT_PORCH(vm.vfront_porch - 1);
- zx_writel(vou->timing + regs->fir_vtiming, val);
-
- if (interlaced) {
- u32 shift = bits->sec_vactive_shift;
- u32 mask = bits->sec_vactive_mask;
-
- val = zx_readl(vou->timing + SEC_V_ACTIVE);
- val &= ~mask;
- val |= ((vm.vactive / 2 - 1) << shift) & mask;
- zx_writel(vou->timing + SEC_V_ACTIVE, val);
-
- val = SYNC_WIDE(vm.vsync_len - 1);
- /*
- * The vback_porch for the second field needs to shift one on
- * the value for the first field.
- */
- val |= BACK_PORCH(vm.vback_porch);
- val |= FRONT_PORCH(vm.vfront_porch - 1);
- zx_writel(vou->timing + regs->sec_vtiming, val);
- }
-
- /* Set up polarities */
- if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
- pol |= 1 << POL_VSYNC_SHIFT;
- if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
- pol |= 1 << POL_HSYNC_SHIFT;
-
- zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
- pol << bits->polarity_shift);
-
- /* Setup SHIFT register by following what ZTE BSP does */
- val = H_SHIFT_VAL;
- if (interlaced)
- val |= V_SHIFT_VAL << 16;
- zx_writel(vou->timing + regs->timing_shift, val);
- zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
-
- /* Progressive or interlace scan select */
- scan_mask = bits->interlace_select | bits->pi_enable;
- zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask,
- interlaced ? scan_mask : 0);
-
- /* Enable TIMING_CTRL */
- zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
- bits->tc_enable);
-
- /* Configure channel screen size */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
- vm.hactive << CHN_SCREEN_W_SHIFT);
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
- vm.vactive << CHN_SCREEN_H_SHIFT);
-
- /* Configure channel interlace buffer control */
- zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN,
- interlaced ? CHN_INTERLACE_EN : 0);
-
- /* Update channel */
- vou_chn_set_update(zcrtc);
-
- /* Enable channel */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
-
- drm_crtc_vblank_on(crtc);
-
- ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
- if (ret) {
- DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(zcrtc->pixclk);
- if (ret)
- DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
-}
-
-static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- const struct zx_crtc_bits *bits = zcrtc->bits;
- struct zx_vou_hw *vou = zcrtc->vou;
-
- clk_disable_unprepare(zcrtc->pixclk);
-
- drm_crtc_vblank_off(crtc);
-
- /* Disable channel */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
-
- /* Disable TIMING_CTRL */
- zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
-}
-
-static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (!event)
- return;
-
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
-}
-
-static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
- .atomic_flush = zx_crtc_atomic_flush,
- .atomic_enable = zx_crtc_atomic_enable,
- .atomic_disable = zx_crtc_atomic_disable,
-};
-
-static int zx_vou_enable_vblank(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
- u32 int_frame_mask = zcrtc->bits->int_frame_mask;
-
- zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
- int_frame_mask);
-
- return 0;
-}
-
-static void zx_vou_disable_vblank(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
-
- zx_writel_mask(vou->timing + TIMING_INT_CTRL,
- zcrtc->bits->int_frame_mask, 0);
-}
-
-static const struct drm_crtc_funcs zx_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
- .enable_vblank = zx_vou_enable_vblank,
- .disable_vblank = zx_vou_disable_vblank,
-};
-
-static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
- enum vou_chn_type chn_type)
-{
- struct device *dev = vou->dev;
- struct zx_plane *zplane;
- struct zx_crtc *zcrtc;
- int ret;
-
- zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
- if (!zcrtc)
- return -ENOMEM;
-
- zcrtc->vou = vou;
- zcrtc->chn_type = chn_type;
-
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane)
- return -ENOMEM;
-
- zplane->dev = dev;
-
- if (chn_type == VOU_CHN_MAIN) {
- zplane->layer = vou->osd + MAIN_GL_OFFSET;
- zplane->csc = vou->osd + MAIN_GL_CSC_OFFSET;
- zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET;
- zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET;
- zplane->bits = &zx_gl_bits[0];
- zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
- zcrtc->chncsc = vou->osd + MAIN_CHN_CSC_OFFSET;
- zcrtc->dither = vou->osd + MAIN_DITHER_OFFSET;
- zcrtc->regs = &main_crtc_regs;
- zcrtc->bits = &main_crtc_bits;
- } else {
- zplane->layer = vou->osd + AUX_GL_OFFSET;
- zplane->csc = vou->osd + AUX_GL_CSC_OFFSET;
- zplane->hbsc = vou->osd + AUX_HBSC_OFFSET;
- zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET;
- zplane->bits = &zx_gl_bits[1];
- zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
- zcrtc->chncsc = vou->osd + AUX_CHN_CSC_OFFSET;
- zcrtc->dither = vou->osd + AUX_DITHER_OFFSET;
- zcrtc->regs = &aux_crtc_regs;
- zcrtc->bits = &aux_crtc_bits;
- }
-
- zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
- "main_wclk" : "aux_wclk");
- if (IS_ERR(zcrtc->pixclk)) {
- ret = PTR_ERR(zcrtc->pixclk);
- DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
- return ret;
- }
-
- ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
- return ret;
- }
-
- zcrtc->primary = &zplane->plane;
-
- ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
- &zx_crtc_funcs, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
- return ret;
- }
-
- drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
-
- if (chn_type == VOU_CHN_MAIN)
- vou->main_crtc = zcrtc;
- else
- vou->aux_crtc = zcrtc;
-
- return 0;
-}
-
-void zx_vou_layer_enable(struct drm_plane *plane)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct zx_plane *zplane = to_zx_plane(plane);
- const struct vou_layer_bits *bits = zplane->bits;
-
- if (zcrtc->chn_type == VOU_CHN_MAIN) {
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0);
- } else {
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel,
- bits->chnsel);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel,
- bits->clksel);
- }
-
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
-}
-
-void zx_vou_layer_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct zx_plane *zplane = to_zx_plane(plane);
- const struct vou_layer_bits *bits = zplane->bits;
-
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0);
-}
-
-static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou)
-{
- struct device *dev = vou->dev;
- struct zx_plane *zplane;
- int i;
- int ret;
-
- /*
- * VL0 has some quirks on scaling support which need special handling.
- * Let's leave it out for now.
- */
- for (i = 1; i < VL_NUM; i++) {
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane) {
- DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i);
- return;
- }
-
- zplane->layer = vou->osd + OSD_VL_OFFSET(i);
- zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i);
- zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i);
- zplane->bits = &zx_vl_bits[i];
-
- ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i);
- continue;
- }
- }
-}
-
-static inline void zx_osd_int_update(struct zx_crtc *zcrtc)
-{
- struct drm_crtc *crtc = &zcrtc->crtc;
- struct drm_plane *plane;
-
- vou_chn_set_update(zcrtc);
-
- drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask)
- zx_plane_set_update(plane);
-}
-
-static irqreturn_t vou_irq_handler(int irq, void *dev_id)
-{
- struct zx_vou_hw *vou = dev_id;
- u32 state;
-
- /* Handle TIMING_CTRL frame interrupts */
- state = zx_readl(vou->timing + TIMING_INT_STATE);
- zx_writel(vou->timing + TIMING_INT_STATE, state);
-
- if (state & TIMING_INT_MAIN_FRAME)
- drm_crtc_handle_vblank(&vou->main_crtc->crtc);
-
- if (state & TIMING_INT_AUX_FRAME)
- drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
-
- /* Handle OSD interrupts */
- state = zx_readl(vou->osd + OSD_INT_STA);
- zx_writel(vou->osd + OSD_INT_CLRSTA, state);
-
- if (state & OSD_INT_MAIN_UPT)
- zx_osd_int_update(vou->main_crtc);
-
- if (state & OSD_INT_AUX_UPT)
- zx_osd_int_update(vou->aux_crtc);
-
- if (state & OSD_INT_ERROR)
- DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
-
- return IRQ_HANDLED;
-}
-
-static void vou_dtrc_init(struct zx_vou_hw *vou)
-{
- /* Clear bit for bypass by ID */
- zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
- TILE2RASTESCAN_BYPASS_MODE, 0);
-
- /* Select ARIDR mode */
- zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
- DETILE_ARID_IN_ARIDR);
-
- /* Bypass decompression for both frames */
- zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
- DTRC_DECOMPRESS_BYPASS);
- zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
- DTRC_DECOMPRESS_BYPASS);
-
- /* Set up ARID register */
- zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
- DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
-}
-
-static void vou_hw_init(struct zx_vou_hw *vou)
-{
- /* Release reset for all VOU modules */
- zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
-
- /* Enable all VOU module clocks */
- zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
-
- /* Clear both OSD and TIMING_CTRL interrupt state */
- zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
- zx_writel(vou->timing + TIMING_INT_STATE, ~0);
-
- /* Enable OSD and TIMING_CTRL interrrupts */
- zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
- zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
-
- /* Select GPC as input to gl/vl scaler as a sane default setting */
- zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
-
- /*
- * Needs to reset channel and layer logic per frame when frame starts
- * to get VOU work properly.
- */
- zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
-
- vou_dtrc_init(vou);
-}
-
-static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct zx_vou_hw *vou;
- struct resource *res;
- int irq;
- int ret;
-
- vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
- if (!vou)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
- vou->osd = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->osd)) {
- ret = PTR_ERR(vou->osd);
- DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
- vou->timing = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->timing)) {
- ret = PTR_ERR(vou->timing);
- DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
- ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
- vou->dtrc = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->dtrc)) {
- ret = PTR_ERR(vou->dtrc);
- DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
- vou->vouctl = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->vouctl)) {
- ret = PTR_ERR(vou->vouctl);
- DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
- ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
- vou->otfppu = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->otfppu)) {
- ret = PTR_ERR(vou->otfppu);
- DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- vou->axi_clk = devm_clk_get(dev, "aclk");
- if (IS_ERR(vou->axi_clk)) {
- ret = PTR_ERR(vou->axi_clk);
- DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
- return ret;
- }
-
- vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
- if (IS_ERR(vou->ppu_clk)) {
- ret = PTR_ERR(vou->ppu_clk);
- DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(vou->axi_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
- return ret;
- }
-
- clk_prepare_enable(vou->ppu_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
- goto disable_axi_clk;
- }
-
- vou->dev = dev;
- dev_set_drvdata(dev, vou);
-
- vou_hw_init(vou);
-
- ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
- goto disable_ppu_clk;
- }
-
- ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
- ret);
- goto disable_ppu_clk;
- }
-
- ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
- ret);
- goto disable_ppu_clk;
- }
-
- zx_overlay_init(drm, vou);
-
- return 0;
-
-disable_ppu_clk:
- clk_disable_unprepare(vou->ppu_clk);
-disable_axi_clk:
- clk_disable_unprepare(vou->axi_clk);
- return ret;
-}
-
-static void zx_crtc_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_vou_hw *vou = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vou->axi_clk);
- clk_disable_unprepare(vou->ppu_clk);
-}
-
-static const struct component_ops zx_crtc_component_ops = {
- .bind = zx_crtc_bind,
- .unbind = zx_crtc_unbind,
-};
-
-static int zx_crtc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_crtc_component_ops);
-}
-
-static int zx_crtc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_crtc_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_crtc_of_match[] = {
- { .compatible = "zte,zx296718-dpc", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
-
-struct platform_driver zx_crtc_driver = {
- .probe = zx_crtc_probe,
- .remove = zx_crtc_remove,
- .driver = {
- .name = "zx-crtc",
- .of_match_table = zx_crtc_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
deleted file mode 100644
index b25f34f865ae..000000000000
--- a/drivers/gpu/drm/zte/zx_vou.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_VOU_H__
-#define __ZX_VOU_H__
-
-#define VOU_CRTC_MASK 0x3
-
-/* VOU output interfaces */
-enum vou_inf_id {
- VOU_HDMI = 0,
- VOU_RGB_LCD = 1,
- VOU_TV_ENC = 2,
- VOU_MIPI_DSI = 3,
- VOU_LVDS = 4,
- VOU_VGA = 5,
-};
-
-enum vou_inf_hdmi_audio {
- VOU_HDMI_AUD_SPDIF = BIT(0),
- VOU_HDMI_AUD_I2S = BIT(1),
- VOU_HDMI_AUD_DSD = BIT(2),
- VOU_HDMI_AUD_HBR = BIT(3),
- VOU_HDMI_AUD_PARALLEL = BIT(4),
-};
-
-void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
- enum vou_inf_hdmi_audio aud);
-void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc);
-void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc);
-
-enum vou_div_id {
- VOU_DIV_VGA,
- VOU_DIV_PIC,
- VOU_DIV_TVENC,
- VOU_DIV_HDMI_PNX,
- VOU_DIV_HDMI,
- VOU_DIV_INF,
- VOU_DIV_LAYER,
-};
-
-enum vou_div_val {
- VOU_DIV_1 = 0,
- VOU_DIV_2 = 1,
- VOU_DIV_4 = 3,
- VOU_DIV_8 = 7,
-};
-
-struct vou_div_config {
- enum vou_div_id id;
- enum vou_div_val val;
-};
-
-void zx_vou_config_dividers(struct drm_crtc *crtc,
- struct vou_div_config *configs, int num);
-
-void zx_vou_layer_enable(struct drm_plane *plane);
-void zx_vou_layer_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state);
-
-#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
deleted file mode 100644
index 2ddb199cb912..000000000000
--- a/drivers/gpu/drm/zte/zx_vou_regs.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_VOU_REGS_H__
-#define __ZX_VOU_REGS_H__
-
-/* Sub-module offset */
-#define MAIN_GL_OFFSET 0x130
-#define MAIN_GL_CSC_OFFSET 0x580
-#define MAIN_CHN_CSC_OFFSET 0x6c0
-#define MAIN_HBSC_OFFSET 0x820
-#define MAIN_DITHER_OFFSET 0x960
-#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
-
-#define AUX_GL_OFFSET 0x200
-#define AUX_GL_CSC_OFFSET 0x5d0
-#define AUX_CHN_CSC_OFFSET 0x710
-#define AUX_HBSC_OFFSET 0x860
-#define AUX_DITHER_OFFSET 0x970
-#define AUX_RSZ_OFFSET 0x800
-
-#define OSD_VL0_OFFSET 0x040
-#define OSD_VL_OFFSET(i) (OSD_VL0_OFFSET + 0x050 * (i))
-
-#define HBSC_VL0_OFFSET 0x760
-#define HBSC_VL_OFFSET(i) (HBSC_VL0_OFFSET + 0x040 * (i))
-
-#define RSZ_VL1_U0 0xa00
-#define RSZ_VL_OFFSET(i) (RSZ_VL1_U0 + 0x200 * (i))
-
-/* OSD (GPC_GLOBAL) registers */
-#define OSD_INT_STA 0x04
-#define OSD_INT_CLRSTA 0x08
-#define OSD_INT_MSK 0x0c
-#define OSD_INT_AUX_UPT BIT(14)
-#define OSD_INT_MAIN_UPT BIT(13)
-#define OSD_INT_GL1_LBW BIT(10)
-#define OSD_INT_GL0_LBW BIT(9)
-#define OSD_INT_VL2_LBW BIT(8)
-#define OSD_INT_VL1_LBW BIT(7)
-#define OSD_INT_VL0_LBW BIT(6)
-#define OSD_INT_BUS_ERR BIT(3)
-#define OSD_INT_CFG_ERR BIT(2)
-#define OSD_INT_ERROR (\
- OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
- OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
- OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
-)
-#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
-#define OSD_CTRL0 0x10
-#define OSD_CTRL0_VL0_EN BIT(13)
-#define OSD_CTRL0_VL0_SEL BIT(12)
-#define OSD_CTRL0_VL1_EN BIT(11)
-#define OSD_CTRL0_VL1_SEL BIT(10)
-#define OSD_CTRL0_VL2_EN BIT(9)
-#define OSD_CTRL0_VL2_SEL BIT(8)
-#define OSD_CTRL0_GL0_EN BIT(7)
-#define OSD_CTRL0_GL0_SEL BIT(6)
-#define OSD_CTRL0_GL1_EN BIT(5)
-#define OSD_CTRL0_GL1_SEL BIT(4)
-#define OSD_RST_CLR 0x1c
-#define RST_PER_FRAME BIT(19)
-
-/* Main/Aux channel registers */
-#define OSD_MAIN_CHN 0x470
-#define OSD_AUX_CHN 0x4d0
-#define CHN_CTRL0 0x00
-#define CHN_ENABLE BIT(0)
-#define CHN_CTRL1 0x04
-#define CHN_SCREEN_W_SHIFT 18
-#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
-#define CHN_SCREEN_H_SHIFT 5
-#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
-#define CHN_UPDATE 0x08
-#define CHN_INTERLACE_BUF_CTRL 0x24
-#define CHN_INTERLACE_EN BIT(2)
-
-/* Dither registers */
-#define OSD_DITHER_CTRL0 0x00
-#define DITHER_BYSPASS BIT(31)
-
-/* TIMING_CTRL registers */
-#define TIMING_TC_ENABLE 0x04
-#define AUX_TC_EN BIT(1)
-#define MAIN_TC_EN BIT(0)
-#define FIR_MAIN_ACTIVE 0x08
-#define FIR_AUX_ACTIVE 0x0c
-#define V_ACTIVE_SHIFT 16
-#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
-#define H_ACTIVE_SHIFT 0
-#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
-#define FIR_MAIN_H_TIMING 0x10
-#define FIR_MAIN_V_TIMING 0x14
-#define FIR_AUX_H_TIMING 0x18
-#define FIR_AUX_V_TIMING 0x1c
-#define SYNC_WIDE_SHIFT 22
-#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
-#define BACK_PORCH_SHIFT 11
-#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
-#define FRONT_PORCH_SHIFT 0
-#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
-#define TIMING_CTRL 0x20
-#define AUX_POL_SHIFT 3
-#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
-#define MAIN_POL_SHIFT 0
-#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
-#define POL_DE_SHIFT 2
-#define POL_VSYNC_SHIFT 1
-#define POL_HSYNC_SHIFT 0
-#define TIMING_INT_CTRL 0x24
-#define TIMING_INT_STATE 0x28
-#define TIMING_INT_AUX_FRAME BIT(3)
-#define TIMING_INT_MAIN_FRAME BIT(1)
-#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
-#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
-#define TIMING_INT_ENABLE (\
- TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
- TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
-)
-#define TIMING_MAIN_SHIFT 0x2c
-#define TIMING_AUX_SHIFT 0x30
-#define H_SHIFT_VAL 0x0048
-#define V_SHIFT_VAL 0x0001
-#define SCAN_CTRL 0x34
-#define AUX_PI_EN BIT(19)
-#define MAIN_PI_EN BIT(18)
-#define AUX_INTERLACE_SEL BIT(1)
-#define MAIN_INTERLACE_SEL BIT(0)
-#define SEC_V_ACTIVE 0x38
-#define SEC_VACT_MAIN_SHIFT 0
-#define SEC_VACT_MAIN_MASK (0xffff << SEC_VACT_MAIN_SHIFT)
-#define SEC_VACT_AUX_SHIFT 16
-#define SEC_VACT_AUX_MASK (0xffff << SEC_VACT_AUX_SHIFT)
-#define SEC_MAIN_V_TIMING 0x3c
-#define SEC_AUX_V_TIMING 0x40
-#define TIMING_MAIN_PI_SHIFT 0x68
-#define TIMING_AUX_PI_SHIFT 0x6c
-#define H_PI_SHIFT_VAL 0x000f
-
-#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
-#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
-
-#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
-#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
-#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
-
-/* DTRC registers */
-#define DTRC_F0_CTRL 0x2c
-#define DTRC_F1_CTRL 0x5c
-#define DTRC_DECOMPRESS_BYPASS BIT(17)
-#define DTRC_DETILE_CTRL 0x68
-#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
-#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
-#define DETILE_ARID_ALL 0
-#define DETILE_ARID_IN_ARIDR 1
-#define DETILE_ARID_BYP_BUT_ARIDR 2
-#define DETILE_ARID_IN_ARIDR2 3
-#define DTRC_ARID 0x6c
-#define DTRC_ARID3_SHIFT 24
-#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
-#define DTRC_ARID2_SHIFT 16
-#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
-#define DTRC_ARID1_SHIFT 8
-#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
-#define DTRC_ARID0_SHIFT 0
-#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
-#define DTRC_DEC2DDR_ARID 0x70
-
-#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
-#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
-#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
-#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
-
-/* VOU_CTRL registers */
-#define VOU_INF_EN 0x00
-#define VOU_INF_CH_SEL 0x04
-#define VOU_INF_DATA_SEL 0x08
-#define VOU_SOFT_RST 0x14
-#define VOU_CLK_SEL 0x18
-#define VGA_AUX_DIV_SHIFT 29
-#define VGA_MAIN_DIV_SHIFT 26
-#define PIC_MAIN_DIV_SHIFT 23
-#define PIC_AUX_DIV_SHIFT 20
-#define VOU_CLK_VL2_SEL BIT(8)
-#define VOU_CLK_VL1_SEL BIT(7)
-#define VOU_CLK_VL0_SEL BIT(6)
-#define VOU_CLK_GL1_SEL BIT(5)
-#define VOU_CLK_GL0_SEL BIT(4)
-#define VOU_DIV_PARA 0x1c
-#define DIV_PARA_UPDATE BIT(31)
-#define TVENC_AUX_DIV_SHIFT 28
-#define HDMI_AUX_PNX_DIV_SHIFT 25
-#define HDMI_MAIN_PNX_DIV_SHIFT 22
-#define HDMI_AUX_DIV_SHIFT 19
-#define HDMI_MAIN_DIV_SHIFT 16
-#define TVENC_MAIN_DIV_SHIFT 13
-#define INF_AUX_DIV_SHIFT 9
-#define INF_MAIN_DIV_SHIFT 6
-#define LAYER_AUX_DIV_SHIFT 3
-#define LAYER_MAIN_DIV_SHIFT 0
-#define VOU_CLK_REQEN 0x20
-#define VOU_CLK_EN 0x24
-#define VOU_INF_HDMI_CTRL 0x30
-#define VOU_HDMI_AUD_MASK 0x1f
-
-/* OTFPPU_CTRL registers */
-#define OTFPPU_RSZ_DATA_SOURCE 0x04
-
-#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index d166ee262ce4..118318513e2d 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1003,19 +1003,16 @@ err_cpmem:
static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
{
unsigned long status;
- int i, bit, irq;
+ int i, bit;
for (i = 0; i < num_regs; i++) {
status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
- for_each_set_bit(bit, &status, 32) {
- irq = irq_linear_revmap(ipu->domain,
- regs[i] * 32 + bit);
- if (irq)
- generic_handle_irq(irq);
- }
+ for_each_set_bit(bit, &status, 32)
+ generic_handle_domain_irq(ipu->domain,
+ regs[i] * 32 + bit);
}
}
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index a1c85d1521f5..82b244cb313e 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
.bits_per_pixel = 16,
};
-#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
-#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * pix->height / 4) + \
- (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * (y) / 2) + (x) / 2)
-#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * pix->height / 2) + \
- (pix->width * (y) / 2) + (x) / 2)
-#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * ((y) / 2)) + (x))
-#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
- (pix->width * y) + (x))
+#define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y))
+#define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * pix->height / 4) + \
+ (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * pix->height / 2) + \
+ (pix->bytesperline * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * ((y) / 2)) + (x))
+#define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
+ (pix->bytesperline * y) + (x))
#define NUM_ALPHA_CHANNELS 7