diff options
Diffstat (limited to 'drivers/gpu/drm/v3d')
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_debugfs.c | 126 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.c | 87 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.h | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_gem.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_irq.c | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_perfmon.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_regs.h | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sched.c | 140 |
8 files changed, 340 insertions, 157 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 76816f2551c1..7e789e181af0 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -21,74 +21,74 @@ struct v3d_reg_def { }; static const struct v3d_reg_def v3d_hub_reg_defs[] = { - REGDEF(33, 42, V3D_HUB_AXICFG), - REGDEF(33, 71, V3D_HUB_UIFCFG), - REGDEF(33, 71, V3D_HUB_IDENT0), - REGDEF(33, 71, V3D_HUB_IDENT1), - REGDEF(33, 71, V3D_HUB_IDENT2), - REGDEF(33, 71, V3D_HUB_IDENT3), - REGDEF(33, 71, V3D_HUB_INT_STS), - REGDEF(33, 71, V3D_HUB_INT_MSK_STS), - - REGDEF(33, 71, V3D_MMU_CTL), - REGDEF(33, 71, V3D_MMU_VIO_ADDR), - REGDEF(33, 71, V3D_MMU_VIO_ID), - REGDEF(33, 71, V3D_MMU_DEBUG_INFO), - - REGDEF(71, 71, V3D_GMP_STATUS(71)), - REGDEF(71, 71, V3D_GMP_CFG(71)), - REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO), + + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)), }; static const struct v3d_reg_def v3d_gca_reg_defs[] = { - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN), - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK), }; static const struct v3d_reg_def v3d_core_reg_defs[] = { - REGDEF(33, 71, V3D_CTL_IDENT0), - REGDEF(33, 71, V3D_CTL_IDENT1), - REGDEF(33, 71, V3D_CTL_IDENT2), - REGDEF(33, 71, V3D_CTL_MISCCFG), - REGDEF(33, 71, V3D_CTL_INT_STS), - REGDEF(33, 71, V3D_CTL_INT_MSK_STS), - REGDEF(33, 71, V3D_CLE_CT0CS), - REGDEF(33, 71, V3D_CLE_CT0CA), - REGDEF(33, 71, V3D_CLE_CT0EA), - REGDEF(33, 71, V3D_CLE_CT1CS), - REGDEF(33, 71, V3D_CLE_CT1CA), - REGDEF(33, 71, V3D_CLE_CT1EA), - - REGDEF(33, 71, V3D_PTB_BPCA), - REGDEF(33, 71, V3D_PTB_BPCS), - - REGDEF(33, 42, V3D_GMP_STATUS(33)), - REGDEF(33, 42, V3D_GMP_CFG(33)), - REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)), - - REGDEF(33, 71, V3D_ERR_FDBGO), - REGDEF(33, 71, V3D_ERR_FDBGB), - REGDEF(33, 71, V3D_ERR_FDBGS), - REGDEF(33, 71, V3D_ERR_STAT), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS), + + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT), }; static const struct v3d_reg_def v3d_csd_reg_defs[] = { - REGDEF(41, 71, V3D_CSD_STATUS), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)), - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7), + REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7), }; static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) @@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); seq_printf(m, "TFU: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, "TSY: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); } @@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) seq_printf(m, " QPUs: %d\n", nslc * qups); seq_printf(m, " Semaphores: %d\n", V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, " BCG int: %d\n", (ident2 & V3D_IDENT2_BCG_INT) != 0); } - if (v3d->ver < 40) { + if (v3d->ver < V3D_GEN_41) { seq_printf(m, " Override TMU: %d\n", (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); } @@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) int core = 0; int measure_ms = 1000; - if (v3d->ver >= 40) { + if (v3d->ver >= V3D_GEN_41) { int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, V3D_SET_FIELD_VER(cycle_count_reg, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 930737a9347b..5e997ae8bc9c 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -17,6 +17,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/sched/clock.h> @@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, args->value = 1; return 0; case DRM_V3D_PARAM_SUPPORTS_PERFMON: - args->value = (v3d->ver >= 40); + args->value = (v3d->ver >= V3D_GEN_41); return 0; case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: args->value = 1; @@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = { }; static const struct of_device_id v3d_of_match[] = { - { .compatible = "brcm,2711-v3d" }, - { .compatible = "brcm,2712-v3d" }, - { .compatible = "brcm,7268-v3d" }, - { .compatible = "brcm,7278-v3d" }, + { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 }, + { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 }, + { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 }, + { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 }, {}, }; MODULE_DEVICE_TABLE(of, v3d_of_match); +static void +v3d_idle_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) { + DRM_ERROR("Failed to power up SMS\n"); + } + + v3d_reset_sms(v3d); +} + +static void +v3d_power_off_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) { + DRM_ERROR("Failed to power off SMS\n"); + } +} + static int map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) { @@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct drm_device *drm; struct v3d_dev *v3d; + enum v3d_gen gen; int ret; u32 mmu_debug; u32 ident1, ident3; @@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drm); + gen = (uintptr_t)of_device_get_match_data(dev); + v3d->ver = gen; + ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) return ret; @@ -295,17 +330,40 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) return ret; + if (v3d->ver >= V3D_GEN_71) { + ret = map_regs(v3d, &v3d->sms_regs, "sms"); + if (ret) + return ret; + } + + v3d->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(v3d->clk)) + return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n"); + + ret = clk_prepare_enable(v3d->clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the V3D clock\n"); + return ret; + } + + v3d_idle_sms(v3d); + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); ret = dma_set_mask_and_coherent(dev, mask); if (ret) - return ret; + goto clk_disable; v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); ident1 = V3D_READ(V3D_HUB_IDENT1); v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + /* Make sure that the V3D tech version retrieved from the HW is equal + * to the one advertised by the device tree. + */ + WARN_ON(v3d->ver != gen); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ @@ -319,28 +377,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) ret = PTR_ERR(v3d->reset); if (ret == -EPROBE_DEFER) - return ret; + goto clk_disable; v3d->reset = NULL; ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); if (ret) { dev_err(dev, "Failed to get reset control or bridge regs\n"); - return ret; + goto clk_disable; } } - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) - return ret; + goto clk_disable; } v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!v3d->mmu_scratch) { dev_err(dev, "Failed to allocate MMU scratch page\n"); - return -ENOMEM; + ret = -ENOMEM; + goto clk_disable; } ret = v3d_gem_init(drm); @@ -369,6 +428,8 @@ gem_destroy: v3d_gem_destroy(drm); dma_free: dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); +clk_disable: + clk_disable_unprepare(v3d->clk); return ret; } @@ -386,6 +447,10 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + + v3d_power_off_sms(v3d); + + clk_disable_unprepare(v3d->clk); } static struct platform_driver v3d_platform_driver = { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index dc1cfe2e14be..b51f0b648a08 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -94,11 +94,18 @@ struct v3d_perfmon { u64 values[] __counted_by(ncounters); }; +enum v3d_gen { + V3D_GEN_33 = 33, + V3D_GEN_41 = 41, + V3D_GEN_42 = 42, + V3D_GEN_71 = 71, +}; + struct v3d_dev { struct drm_device drm; /* Short representation (e.g. 33, 41) of the V3D tech version */ - int ver; + enum v3d_gen ver; /* Short representation (e.g. 5, 6) of the V3D tech revision */ int rev; @@ -111,6 +118,7 @@ struct v3d_dev { void __iomem *core_regs[3]; void __iomem *bridge_regs; void __iomem *gca_regs; + void __iomem *sms_regs; struct clk *clk; struct reset_control *reset; @@ -147,7 +155,6 @@ struct v3d_dev { struct v3d_render_job *render_job; struct v3d_tfu_job *tfu_job; struct v3d_csd_job *csd_job; - struct v3d_cpu_job *cpu_job; struct v3d_queue_state queue[V3D_MAX_QUEUES]; @@ -200,7 +207,7 @@ to_v3d_dev(struct drm_device *dev) static inline bool v3d_has_csd(struct v3d_dev *v3d) { - return v3d->ver >= 41; + return v3d->ver >= V3D_GEN_41; } #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev) @@ -262,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence) #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) +#define V3D_SMS_IDLE 0x0 +#define V3D_SMS_ISOLATING_FOR_RESET 0xa +#define V3D_SMS_RESETTING 0xb +#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc +#define V3D_SMS_POWER_OFF_STATE 0xd + +#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset)) +#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset)) + #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) @@ -540,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); +void v3d_reset_sms(struct v3d_dev *v3d); void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b1e681630ded..d7d16da78db3 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core) * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ - if (v3d->ver < 40) + if (v3d->ver < V3D_GEN_41) V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush @@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core) static void v3d_idle_gca(struct v3d_dev *v3d) { - if (v3d->ver >= 41) + if (v3d->ver >= V3D_GEN_41) return; V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); @@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d) } void +v3d_reset_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE)); + + if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) && + !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) { + DRM_ERROR("Failed to wait for SMS reset\n"); + } +} + +void v3d_reset(struct v3d_dev *v3d) { struct drm_device *dev = &v3d->drm; @@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d) v3d_idle_axi(v3d, 0); v3d_idle_gca(v3d); + v3d_reset_sms(v3d); v3d_reset_v3d(v3d); v3d_mmu_set_page_table(v3d); @@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d) static void v3d_flush_l3(struct v3d_dev *v3d) { - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); - if (v3d->ver < 33) { + if (v3d->ver < V3D_GEN_33) { V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); } @@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d) static void v3d_invalidate_l2c(struct v3d_dev *v3d, int core) { - if (v3d->ver > 32) + if (v3d->ver >= V3D_GEN_33) return; V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 72b6a119412f..2cca5d3a26a2 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg) /* We shouldn't be triggering these if we have GMP in * always-allowed mode. */ - if (v3d->ver < 71 && (intsts & V3D_INT_GMPV)) + if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV)) dev_err(v3d->drm.dev, "GMP violation\n"); /* V3D 4.2 wires the hub and core IRQs together, so if we & @@ -186,27 +186,59 @@ v3d_hub_irq(int irq, void *arg) u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) << (v3d->va_width - 32)); - static const char *const v3d41_axi_ids[] = { - "L2T", - "PTB", - "PSE", - "TLB", - "CLE", - "TFU", - "MMU", - "GMP", + static const struct { + u32 begin; + u32 end; + const char *client; + } v3d41_axi_ids[] = { + {0x00, 0x20, "L2T"}, + {0x20, 0x21, "PTB"}, + {0x40, 0x41, "PSE"}, + {0x60, 0x80, "TLB"}, + {0x80, 0x88, "CLE"}, + {0xA0, 0xA1, "TFU"}, + {0xC0, 0xE0, "MMU"}, + {0xE0, 0xE1, "GMP"}, + }, v3d71_axi_ids[] = { + {0x00, 0x30, "L2T"}, + {0x30, 0x38, "CLE"}, + {0x38, 0x39, "PTB"}, + {0x39, 0x3A, "PSE"}, + {0x3A, 0x3B, "CSD"}, + {0x40, 0x60, "TLB"}, + {0x60, 0x70, "MMU"}, + {0x7C, 0x7E, "TFU"}, + {0x7F, 0x80, "GMP"}, }; const char *client = "?"; V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL)); - if (v3d->ver >= 41) { - axi_id = axi_id >> 5; - if (axi_id < ARRAY_SIZE(v3d41_axi_ids)) - client = v3d41_axi_ids[axi_id]; + if (v3d->ver >= V3D_GEN_71) { + size_t i; + + axi_id = axi_id & 0x7F; + for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) { + if (axi_id >= v3d71_axi_ids[i].begin && + axi_id < v3d71_axi_ids[i].end) { + client = v3d71_axi_ids[i].client; + break; + } + } + } else if (v3d->ver >= V3D_GEN_41) { + size_t i; + + axi_id = axi_id & 0xFF; + for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) { + if (axi_id >= v3d41_axi_ids[i].begin && + axi_id < v3d41_axi_ids[i].end) { + client = v3d41_axi_ids[i].client; + break; + } + } } - dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", + dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n", client, axi_id, (long long)vio_addr, ((intsts & V3D_HUB_INT_MMU_WRV) ? ", write violation" : ""), @@ -217,7 +249,7 @@ v3d_hub_irq(int irq, void *arg) status = IRQ_HANDLED; } - if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) { + if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) { dev_err(v3d->drm.dev, "GMP Violation\n"); status = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 3ebda2fa46fc..9a3fe5255874 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d) const struct v3d_perf_counter_desc *counters = NULL; unsigned int max = 0; - if (v3d->ver >= 71) { + if (v3d->ver >= V3D_GEN_71) { counters = v3d_v71_performance_counters; max = ARRAY_SIZE(v3d_v71_performance_counters); - } else if (v3d->ver >= 42) { + } else if (v3d->ver >= V3D_GEN_42) { counters = v3d_v42_performance_counters; max = ARRAY_SIZE(v3d_v42_performance_counters); } diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 6da3c69082bd..c1870265eaee 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -515,4 +515,30 @@ # define V3D_ERR_VPAERGS BIT(1) # define V3D_ERR_VPAEABB BIT(0) +#define V3D_SMS_REE_CS 0x00000 +#define V3D_SMS_TEE_CS 0x00400 +# define V3D_SMS_INTERRUPT BIT(31) +# define V3D_SMS_POWER_OFF BIT(30) +# define V3D_SMS_CLEAR_POWER_OFF BIT(29) +# define V3D_SMS_LOCK BIT(28) +# define V3D_SMS_CLEAR_LOCK BIT(27) +# define V3D_SMS_SVP_MODE_EXIT BIT(26) +# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25) +# define V3D_SMS_SVP_MODE_ENTER BIT(24) +# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23) +# define V3D_SMS_THEIR_MODE_EXIT BIT(22) +# define V3D_SMS_THEIR_MODE_ENTER BIT(21) +# define V3D_SMS_OUR_MODE_EXIT BIT(20) +# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19) +# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10) +# define V3D_SMS_SEQ_PC_SHIFT 10 +# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8) +# define V3D_SMS_HUBCORE_STATUS_SHIFT 8 +# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6) +# define V3D_SMS_NEW_MODE_SHIFT 6 +# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4) +# define V3D_SMS_OLD_MODE_SHIFT 4 +# define V3D_SMS_STATE_MASK V3D_MASK(3, 0) +# define V3D_SMS_STATE_SHIFT 0 + #endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index da08ddb01d21..35f131a46d07 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) struct dma_fence *fence; unsigned long irqflags; - if (unlikely(job->base.base.s_fence->finished.error)) + if (unlikely(job->base.base.s_fence->finished.error)) { + spin_lock_irqsave(&v3d->job_lock, irqflags); + v3d->bin_job = NULL; + spin_unlock_irqrestore(&v3d->job_lock, irqflags); return NULL; + } /* Lock required around bin_job update vs * v3d_overflow_mem_work(). @@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) struct drm_device *dev = &v3d->drm; struct dma_fence *fence; - if (unlikely(job->base.base.s_fence->finished.error)) + if (unlikely(job->base.base.s_fence->finished.error)) { + v3d->render_job = NULL; return NULL; + } v3d->render_job = job; @@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) struct drm_device *dev = &v3d->drm; struct dma_fence *fence; + if (unlikely(job->base.base.s_fence->finished.error)) { + v3d->tfu_job = NULL; + return NULL; + } + + v3d->tfu_job = job; + fence = v3d_fence_create(v3d, V3D_TFU); if (IS_ERR(fence)) return NULL; - v3d->tfu_job = job; if (job->base.irq_fence) dma_fence_put(job->base.irq_fence); job->base.irq_fence = dma_fence_get(fence); @@ -345,11 +357,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua); V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa); - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc); V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios); V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]); - if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { + if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]); V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]); V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]); @@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) struct dma_fence *fence; int i, csd_cfg0_reg; + if (unlikely(job->base.base.s_fence->finished.error)) { + v3d->csd_job = NULL; + return NULL; + } + v3d->csd_job = job; v3d_invalidate_caches(v3d); @@ -395,7 +412,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) * * XXX: Set the CFG7 register */ - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0); /* CFG0 write kicks off the job. */ @@ -411,7 +428,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); struct drm_v3d_submit_csd *args = &indirect_csd->job->args; - u32 *wg_counts; + struct v3d_dev *v3d = job->base.v3d; + u32 num_batches, *wg_counts; v3d_get_bo_vaddr(bo); v3d_get_bo_vaddr(indirect); @@ -424,8 +442,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; - args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * - (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]); + + /* V3D 7.1.6 and later don't subtract 1 from the number of batches */ + if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6)) + args->cfg[4] = num_batches - 1; + else + args->cfg[4] = num_batches; + + WARN_ON(args->cfg[4] == ~0); for (int i = 0; i < 3; i++) { /* 0xffffffff indicates that the uniform rewrite is not needed */ @@ -656,8 +683,6 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job) struct v3d_cpu_job *job = to_cpu_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - v3d->cpu_job = job; - if (job->job_type >= ARRAY_SIZE(cpu_job_function)) { DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type); return NULL; @@ -719,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NOMINAL; } -/* If the current address or return address have changed, then the GPU - * has probably made progress and we should delay the reset. This - * could fail if the GPU got in an infinite loop in the CL, but that - * is pretty unlikely outside of an i-g-t testcase. - */ +static void +v3d_sched_skip_reset(struct drm_sched_job *sched_job) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); +} + static enum drm_gpu_sched_stat v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 *timedout_ctca, u32 *timedout_ctra) @@ -733,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + /* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ if (*timedout_ctca != ctca || *timedout_ctra != ctra) { *timedout_ctca = ctca; *timedout_ctra = ctra; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -775,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) struct v3d_dev *v3d = job->base.v3d; u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver)); - /* If we've made progress, skip reset and let the timer get - * rearmed. + /* If we've made progress, skip reset, add the job to the pending + * list, and let the timer get rearmed. */ if (job->timedout_batches != batches) { job->timedout_batches = batches; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -822,67 +861,54 @@ static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { .free_job = v3d_cpu_job_free }; +static int +v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops, + enum v3d_queue queue, const char *name) +{ + struct drm_sched_init_args args = { + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = 1, + .timeout = msecs_to_jiffies(500), + .dev = v3d->drm.dev, + }; + + args.ops = ops; + args.name = name; + + return drm_sched_init(&v3d->queue[queue].sched, &args); +} + int v3d_sched_init(struct v3d_dev *v3d) { - int hw_jobs_limit = 1; - int job_hang_limit = 0; - int hang_limit_ms = 500; int ret; - ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, - &v3d_bin_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_bin", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin"); if (ret) return ret; - ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, - &v3d_render_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_render", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER, + "v3d_render"); if (ret) goto fail; - ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, - &v3d_tfu_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_tfu", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu"); if (ret) goto fail; if (v3d_has_csd(v3d)) { - ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, - &v3d_csd_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_csd", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD, + "v3d_csd"); if (ret) goto fail; - ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, - &v3d_cache_clean_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_cache_clean", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops, + V3D_CACHE_CLEAN, "v3d_cache_clean"); if (ret) goto fail; } - ret = drm_sched_init(&v3d->queue[V3D_CPU].sched, - &v3d_cpu_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - 1, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_cpu", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu"); if (ret) goto fail; |