summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno')
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c61
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c385
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c335
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c438
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h31
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h74
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c74
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c1201
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h420
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h332
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h470
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h56
28 files changed, 3077 insertions, 1079 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
index 5ddd015f930d..e9dbf3ddf89e 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a2xx_gpu.h"
static const struct adreno_info a2xx_gpus[] = {
{
@@ -19,7 +20,7 @@ static const struct adreno_info a2xx_gpus[] = {
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}, { /* a200 on i.mx51 has only 128kib gmem */
.chip_ids = ADRENO_CHIP_IDS(0x02000001),
.family = ADRENO_2XX_GEN1,
@@ -30,7 +31,7 @@ static const struct adreno_info a2xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x02020000),
.family = ADRENO_2XX_GEN2,
@@ -41,7 +42,7 @@ static const struct adreno_info a2xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a2xx);
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index ec38db45d8a3..1b1ee14b65cf 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -234,7 +234,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
* word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225).
* Older firmware files, which lack protection support, have 0 instead.
*/
- if (ptr[1] == 0) {
+ if (ptr[1] == 0 && !a2xx_gpu->protection_disabled) {
dev_warn(gpu->dev->dev,
"Legacy firmware detected, disabling protection support\n");
a2xx_gpu->protection_disabled = true;
@@ -486,39 +486,18 @@ static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a2xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
- .recover = a2xx_recover,
- .submit = a2xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a2xx_irq,
- .destroy = a2xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_state_get = a2xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = a2xx_create_vm,
- .get_rptr = a2xx_get_rptr,
- },
-};
-
static const struct msm_gpu_perfcntr perfcntrs[] = {
/* TODO */
};
-struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
{
struct a2xx_gpu *a2xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
int ret;
if (!pdev) {
@@ -539,7 +518,7 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
@@ -558,3 +537,26 @@ fail:
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a2xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = a2xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = a2xx_create_vm,
+ .get_rptr = a2xx_get_rptr,
+ },
+ .init = a2xx_gpu_init,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
index 53702f19990f..162ef98951f5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -19,6 +19,8 @@ struct a2xx_gpu {
};
#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+extern const struct adreno_gpu_funcs a2xx_gpu_funcs;
+
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu);
void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 1498e6532f62..6ae8716fc08a 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a3xx_gpu.h"
static const struct adreno_info a3xx_gpus[] = {
{
@@ -18,7 +19,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000520),
.family = ADRENO_3XX,
@@ -29,7 +30,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000600),
.family = ADRENO_3XX,
@@ -40,7 +41,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000620),
.family = ADRENO_3XX,
@@ -51,7 +52,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
@@ -66,7 +67,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03030000,
@@ -81,7 +82,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a3xx);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index a956cd79195e..f22d33e99e81 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -508,29 +508,6 @@ static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a3xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
- .recover = a3xx_recover,
- .submit = a3xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a3xx_irq,
- .destroy = a3xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_busy = a3xx_gpu_busy,
- .gpu_state_get = a3xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a3xx_get_rptr,
- },
-};
-
static const struct msm_gpu_perfcntr perfcntrs[] = {
{ REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
@@ -538,13 +515,14 @@ static const struct msm_gpu_perfcntr perfcntrs[] = {
SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
};
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
@@ -569,7 +547,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
@@ -613,3 +591,27 @@ fail:
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a3xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a3xx_recover,
+ .submit = a3xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a3xx_gpu_busy,
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a3xx_get_rptr,
+ },
+ .init = a3xx_gpu_init,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index c555fb13e0d7..3d4ec9dbd918 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -23,4 +23,6 @@ struct a3xx_gpu {
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+extern const struct adreno_gpu_funcs a3xx_gpu_funcs;
+
#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
index 09f9f228b75e..9192586f7ef0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a4xx_gpu.h"
static const struct adreno_info a4xx_gpus[] = {
{
@@ -19,7 +20,7 @@ static const struct adreno_info a4xx_gpus[] = {
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04020000),
.family = ADRENO_4XX,
@@ -30,7 +31,7 @@ static const struct adreno_info a4xx_gpus[] = {
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04030002),
.family = ADRENO_4XX,
@@ -41,7 +42,7 @@ static const struct adreno_info a4xx_gpus[] = {
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a4xx);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 83f6329accba..db06c06067ae 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -627,37 +627,14 @@ static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a4xx_hw_init,
- .pm_suspend = a4xx_pm_suspend,
- .pm_resume = a4xx_pm_resume,
- .recover = a4xx_recover,
- .submit = a4xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a4xx_irq,
- .destroy = a4xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_busy = a4xx_gpu_busy,
- .gpu_state_get = a4xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a4xx_get_rptr,
- },
- .get_timestamp = a4xx_get_timestamp,
-};
-
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
{
struct a4xx_gpu *a4xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
@@ -680,7 +657,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
@@ -726,3 +703,28 @@ fail:
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a4xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
+ .recover = a4xx_recover,
+ .submit = a4xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a4xx_gpu_busy,
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a4xx_get_rptr,
+ },
+ .init = a4xx_gpu_init,
+ .get_timestamp = a4xx_get_timestamp,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index a01448cba2ea..71b164439f62 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -20,4 +20,6 @@ struct a4xx_gpu {
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+extern const struct adreno_gpu_funcs a4xx_gpu_funcs;
+
#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
index b48a636d8237..babd320f3b73 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a5xx_gpu.h"
static const struct adreno_info a5xx_gpus[] = {
{
@@ -21,7 +22,7 @@ static const struct adreno_info a5xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000600),
.family = ADRENO_5XX,
@@ -38,7 +39,7 @@ static const struct adreno_info a5xx_gpus[] = {
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a506_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000800),
@@ -55,7 +56,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a508_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000900),
@@ -72,7 +73,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
@@ -89,7 +90,7 @@ static const struct adreno_info a5xx_gpus[] = {
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010200),
.family = ADRENO_5XX,
@@ -105,7 +106,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(
@@ -127,7 +128,7 @@ static const struct adreno_info a5xx_gpus[] = {
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a530_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05040001),
@@ -145,7 +146,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a540_zap.mdt",
}
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4a04dc43a8e6..56eaff2ee4e4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1691,34 +1691,6 @@ static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a5xx_hw_init,
- .ucode_load = a5xx_ucode_load,
- .pm_suspend = a5xx_pm_suspend,
- .pm_resume = a5xx_pm_resume,
- .recover = a5xx_recover,
- .submit = a5xx_submit,
- .active_ring = a5xx_active_ring,
- .irq = a5xx_irq,
- .destroy = a5xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = a5xx_show,
-#endif
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = a5xx_debugfs_init,
-#endif
- .gpu_busy = a5xx_gpu_busy,
- .gpu_state_get = a5xx_gpu_state_get,
- .gpu_state_put = a5xx_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a5xx_get_rptr,
- },
- .get_timestamp = a5xx_get_timestamp,
-};
-
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
@@ -1751,7 +1723,7 @@ static void check_speed_bin(struct device *dev)
devm_pm_opp_set_supported_hw(dev, &val, 1);
}
-struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
@@ -1781,7 +1753,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
if (config->info->revn == 510)
nr_rings = 1;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
@@ -1806,3 +1778,32 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return gpu;
}
+
+const struct adreno_gpu_funcs a5xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a5xx_hw_init,
+ .ucode_load = a5xx_ucode_load,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .submit = a5xx_submit,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = a5xx_debugfs_init,
+#endif
+ .gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a5xx_get_rptr,
+ },
+ .init = a5xx_gpu_init,
+ .get_timestamp = a5xx_get_timestamp,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 9c0d701fe4b8..407bb950d350 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -133,6 +133,7 @@ struct a5xx_preempt_record {
*/
#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+extern const struct adreno_gpu_funcs a5xx_gpu_funcs;
int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 44df6410bce1..29107b362346 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -672,6 +672,14 @@ static const u32 a690_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a690_protect, 48);
+static const struct adreno_reglist a640_gbif[] = {
+ { REG_A6XX_GBIF_QSB_SIDE0, 0x00071620 },
+ { REG_A6XX_GBIF_QSB_SIDE1, 0x00071620 },
+ { REG_A6XX_GBIF_QSB_SIDE2, 0x00071620 },
+ { REG_A6XX_GBIF_QSB_SIDE3, 0x00071620 },
+ { },
+};
+
static const struct adreno_info a6xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x06010000),
@@ -683,11 +691,12 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = (SZ_128K + SZ_4K),
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.zapfw = "a610_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00080000,
},
@@ -706,6 +715,22 @@ static const struct adreno_info a6xx_gpus[] = {
{ 127, 4 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010200),
+ .family = ADRENO_6XX_GEN1,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a612_rgmu.bin",
+ },
+ .gmem = (SZ_128K + SZ_4K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .funcs = &a6xx_gmuwrapper_funcs,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a612_hwcg,
+ .protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000022,
+ .prim_fifo_threshold = 0x00080000,
+ },
+ }, {
.chip_ids = ADRENO_CHIP_IDS(0x06010500),
.family = ADRENO_6XX_GEN1,
.revn = 615,
@@ -716,7 +741,7 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -747,7 +772,7 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -774,7 +799,7 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
@@ -797,7 +822,7 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -822,7 +847,7 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -847,7 +872,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -873,11 +898,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a620_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a620_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
@@ -896,10 +922,11 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
.bcms = (const struct a6xx_bcm[]) {
@@ -933,7 +960,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
@@ -953,7 +980,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
@@ -977,11 +1004,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a650_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
@@ -1003,11 +1031,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a660_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
@@ -1022,10 +1051,11 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a660_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
@@ -1045,11 +1075,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a660_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
@@ -1072,7 +1103,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
@@ -1091,11 +1122,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a690_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a690_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
@@ -1426,11 +1458,12 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.zapfw = "a702_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x0000c000,
},
@@ -1452,12 +1485,13 @@ static const struct adreno_info a7xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "a730_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020000,
},
.preempt_record_size = 2860 * SZ_1K,
@@ -1473,12 +1507,13 @@ static const struct adreno_info a7xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "a740_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
@@ -1507,12 +1542,13 @@ static const struct adreno_info a7xx_gpus[] = {
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION |
ADRENO_QUIRK_IFPC,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
.ifpc_reglist = &a750_ifpc_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
@@ -1548,12 +1584,13 @@ static const struct adreno_info a7xx_gpus[] = {
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION |
ADRENO_QUIRK_IFPC,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
.ifpc_reglist = &a750_ifpc_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
@@ -1581,11 +1618,12 @@ static const struct adreno_info a7xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x70f0000,
.gmu_cgc_mode = 0x00020222,
.bcms = (const struct a6xx_bcm[]) {
@@ -1612,6 +1650,306 @@ static const struct adreno_info a7xx_gpus[] = {
};
DECLARE_ADRENO_GPULIST(a7xx);
+static const struct adreno_reglist_pipe x285_nonctxt_regs[] = {
+ { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) },
+ { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0x00200000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_VIS_STREAM_CNTL, 0x10010000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0x00000002, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_1, 0x00000003, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_2, 0x00000200, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_3, 0x00500000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_4, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_RB_GC_GMEM_PROTECT, 0x15000000, BIT(PIPE_BR) },
+ { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0x00000007, BIT(PIPE_BR) },
+ { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0x00004000, BIT(PIPE_BR) },
+ { REG_A8XX_RBBM_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL2, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, 0x0fffffff, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x22122212, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_CGC_P2S_CNTL, 0x00000040, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_CHICKEN_BITS_2, 0x00820800, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_CHICKEN_BITS_3, 0x00300000, BIT(PIPE_NONE) },
+ { REG_A6XX_SP_PERFCTR_SHADER_MASK, 0x0000003f, BIT(PIPE_NONE) },
+ /* Disable CS dead batch merge */
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, BIT(31), BIT(PIPE_NONE) },
+ { REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, 0x00000080, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_READ_SEL, 0x0001ff00, BIT(PIPE_NONE) },
+ { REG_A6XX_TPL1_DBG_ECO_CNTL, 0x10000000, BIT(PIPE_NONE) },
+ /* BIT(26): Disable final clamp for bicubic filtering */
+ { REG_A6XX_TPL1_DBG_ECO_CNTL1, 0x00000720, BIT(PIPE_NONE) },
+ { REG_A6XX_UCHE_MODE_CNTL, 0x80080000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_MODE_CNTL, 0x00001000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_CACHE_WAYS, 0x00000800, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_GBIF_GX_CONFIG, 0x010240e0, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, 0x00000020, BIT(PIPE_NONE) },
+ { REG_A7XX_VFD_DBG_ECO_CNTL, 0x00008000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BV_THRESHOLD, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BR_THRESHOLD, 0x00600060, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0x00200020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_LP_REQ_CNT, 0x00000020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { },
+};
+
+static const u32 x285_protect_regs[] = {
+ A6XX_PROTECT_RDONLY(0x00008, 0x039b),
+ A6XX_PROTECT_RDONLY(0x003b4, 0x008b),
+ A6XX_PROTECT_NORDWR(0x00440, 0x001f),
+ A6XX_PROTECT_RDONLY(0x00580, 0x005f),
+ A6XX_PROTECT_NORDWR(0x005e0, 0x011f),
+ A6XX_PROTECT_RDONLY(0x0074a, 0x0005),
+ A6XX_PROTECT_RDONLY(0x00759, 0x0026),
+ A6XX_PROTECT_RDONLY(0x00789, 0x0000),
+ A6XX_PROTECT_RDONLY(0x0078c, 0x0013),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0029),
+ A6XX_PROTECT_NORDWR(0x0082c, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00837, 0x00af),
+ A6XX_PROTECT_RDONLY(0x008e7, 0x00c9),
+ A6XX_PROTECT_NORDWR(0x008ec, 0x00c3),
+ A6XX_PROTECT_NORDWR(0x009b1, 0x0250),
+ A6XX_PROTECT_RDONLY(0x00ce0, 0x0001),
+ A6XX_PROTECT_RDONLY(0x00df0, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00df1, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c5),
+ A6XX_PROTECT_RDONLY(0x03cc6, 0x0039),
+ A6XX_PROTECT_NORDWR(0x03d00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x00ff),
+ A6XX_PROTECT_RDONLY(0x08f00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08f01, 0x01be),
+ A6XX_PROTECT_NORDWR(0x09600, 0x01ff),
+ A6XX_PROTECT_RDONLY(0x0981a, 0x02e5),
+ A6XX_PROTECT_NORDWR(0x09e00, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a82e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae00, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0ae08, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0ae10, 0x00bf),
+ A6XX_PROTECT_RDONLY(0x0aed0, 0x002f),
+ A6XX_PROTECT_NORDWR(0x0af00, 0x027f),
+ A6XX_PROTECT_NORDWR(0x0b600, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x0dc00, 0x1fff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x003f),
+ A6XX_PROTECT_RDONLY(0x18440, 0x013f),
+ A6XX_PROTECT_NORDWR(0x18580, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1b400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0477),
+ A6XX_PROTECT_RDONLY(0x1f878, 0x0507),
+ A6XX_PROTECT_NORDWR(0x1f930, 0x0329),
+ A6XX_PROTECT_NORDWR(0x1fd80, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x27800, 0x007f),
+ A6XX_PROTECT_RDONLY(0x27880, 0x0385),
+ A6XX_PROTECT_NORDWR(0x27882, 0x000a),
+ A6XX_PROTECT_NORDWR(0x27c06, 0x0000),
+};
+
+DECLARE_ADRENO_PROTECT(x285_protect, 64);
+
+static const struct adreno_reglist_pipe a840_nonctxt_regs[] = {
+ { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) },
+ { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0x00200000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_VIS_STREAM_CNTL, 0x10010000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0x00000002, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_1, 0x00000003, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_2, 0x00000200, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_3, 0x00500000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_4, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ /* Disable Dead Draw Merge scheme on RB-HLSQ */
+ { REG_A6XX_RB_RBP_CNTL, BIT(5), BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A7XX_RB_CCU_CNTL, 0x00000068, BIT(PIPE_BR) },
+ /* Partially enable perf clear, Disable DINT to c/z be data forwarding */
+ { REG_A7XX_RB_CCU_DBG_ECO_CNTL, 0x00002200, BIT(PIPE_BR) },
+ { REG_A8XX_RB_GC_GMEM_PROTECT, 0x12000000, BIT(PIPE_BR) },
+ { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0x00000007, BIT(PIPE_BR) },
+ { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0x00004000, BIT(PIPE_BR) },
+ { REG_A8XX_RBBM_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_POWER_UP_RESET_SW_OVERRIDE, 0x70809060, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_POWER_UP_RESET_SW_BV_OVERRIDE, 0x30000000, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL2, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, 0x0fffffff, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x22122212, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_CGC_P2S_CNTL, 0x00000040, BIT(PIPE_NONE) },
+ /* Disable mode_switch optimization in UMAS */
+ { REG_A6XX_SP_CHICKEN_BITS, BIT(24) | BIT(26), BIT(PIPE_NONE) },
+ /* Disable LPAC large-LM mode */
+ { REG_A8XX_SP_SS_CHICKEN_BITS_0, BIT(3), BIT(PIPE_NONE) },
+ /* Disable PS out of order retire */
+ { REG_A7XX_SP_CHICKEN_BITS_2, 0x00c21800, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_CHICKEN_BITS_3, 0x00300000, BIT(PIPE_NONE) },
+ /* Disable SP2TP info attribute */
+ { REG_A8XX_SP_CHICKEN_BITS_4, 0x00000002, BIT(PIPE_NONE) },
+ { REG_A6XX_SP_PERFCTR_SHADER_MASK, 0x0000003f, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL, BIT(14), BIT(PIPE_NONE) },
+ /* Ignore HLSQ shared constant feedback from SP */
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_1, BIT(17), BIT(PIPE_NONE) },
+ /* Disable CS dead batch merge */
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, BIT(24), BIT(PIPE_NONE) },
+ { REG_A8XX_SP_HLSQ_DBG_ECO_CNTL_3, BIT(7), BIT(PIPE_NONE) },
+ { REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, 0x00000080, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_READ_SEL, 0x0001ff00, BIT(PIPE_NONE) },
+ { REG_A6XX_TPL1_DBG_ECO_CNTL, 0x10100000, BIT(PIPE_NONE) },
+ /* BIT(26): Disable final clamp for bicubic filtering */
+ { REG_A6XX_TPL1_DBG_ECO_CNTL1, 0x04000720, BIT(PIPE_NONE) },
+ { REG_A6XX_UCHE_MODE_CNTL, 0x80080000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_MODE_CNTL, 0x00001000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_CACHE_WAYS, 0x00000800, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_GBIF_GX_CONFIG, 0x010240e0, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, 0x00000020, BIT(PIPE_NONE) },
+ { REG_A7XX_VFD_DBG_ECO_CNTL, 0x00008000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BV_THRESHOLD, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BR_THRESHOLD, 0x00600060, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0x00200020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_LP_REQ_CNT, 0x00000020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { },
+};
+
+static const u32 a840_protect_regs[] = {
+ A6XX_PROTECT_RDONLY(0x00008, 0x039b),
+ A6XX_PROTECT_RDONLY(0x003b4, 0x008b),
+ A6XX_PROTECT_NORDWR(0x00440, 0x001f),
+ A6XX_PROTECT_RDONLY(0x00580, 0x005f),
+ A6XX_PROTECT_NORDWR(0x005e0, 0x011f),
+ A6XX_PROTECT_RDONLY(0x0074a, 0x0005),
+ A6XX_PROTECT_RDONLY(0x00759, 0x001b),
+ A6XX_PROTECT_NORDWR(0x00775, 0x000a),
+ A6XX_PROTECT_RDONLY(0x00789, 0x0000),
+ A6XX_PROTECT_RDONLY(0x0078c, 0x0013),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0029),
+ A6XX_PROTECT_NORDWR(0x00837, 0x00af),
+ A6XX_PROTECT_RDONLY(0x008e7, 0x00c9),
+ A6XX_PROTECT_NORDWR(0x008ec, 0x00c3),
+ A6XX_PROTECT_NORDWR(0x009b1, 0x0250),
+ A6XX_PROTECT_NORDWR(0x00c07, 0x0008),
+ A6XX_PROTECT_RDONLY(0x00ce0, 0x0001),
+ A6XX_PROTECT_RDONLY(0x00df0, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00df1, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c5),
+ A6XX_PROTECT_RDONLY(0x03cc6, 0x0039),
+ A6XX_PROTECT_NORDWR(0x03d00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x00ff),
+ A6XX_PROTECT_RDONLY(0x08f00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08f01, 0x01be),
+ A6XX_PROTECT_NORDWR(0x09600, 0x01ff),
+ A6XX_PROTECT_RDONLY(0x0981a, 0x02e5),
+ A6XX_PROTECT_NORDWR(0x09e00, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a82e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0004),
+ A6XX_PROTECT_NORDWR(0x0ae08, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0ae10, 0x00bf),
+ A6XX_PROTECT_RDONLY(0x0aed0, 0x002f),
+ A6XX_PROTECT_NORDWR(0x0af00, 0x027f),
+ A6XX_PROTECT_NORDWR(0x0b600, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x0dc00, 0x1fff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x003f),
+ A6XX_PROTECT_RDONLY(0x18440, 0x013f),
+ A6XX_PROTECT_NORDWR(0x18580, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1b400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0477),
+ A6XX_PROTECT_RDONLY(0x1f878, 0x0507),
+ A6XX_PROTECT_NORDWR(0x1f930, 0x0329),
+ A6XX_PROTECT_NORDWR(0x1fd80, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x27800, 0x007f),
+ A6XX_PROTECT_RDONLY(0x27880, 0x0385),
+ A6XX_PROTECT_NORDWR(0x27882, 0x0009),
+ A6XX_PROTECT_NORDWR(0x27c06, 0x0000),
+};
+DECLARE_ADRENO_PROTECT(a840_protect, 15);
+
+static const struct adreno_reglist a840_gbif[] = {
+ { REG_A6XX_GBIF_QSB_SIDE0, 0x00071e20 },
+ { REG_A6XX_GBIF_QSB_SIDE1, 0x00071e20 },
+ { REG_A6XX_GBIF_QSB_SIDE2, 0x00071e20 },
+ { REG_A6XX_GBIF_QSB_SIDE3, 0x00071e20 },
+ { REG_A8XX_GBIF_CX_CONFIG, 0x20023000 },
+ { },
+};
+
+static const struct adreno_info a8xx_gpus[] = {
+ {
+ .chip_ids = ADRENO_CHIP_IDS(0x44070001),
+ .family = ADRENO_8XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen80100_sqe.fw",
+ [ADRENO_FW_GMU] = "gen80100_gmu.bin",
+ },
+ .gmem = 21 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .funcs = &a8xx_gpu_funcs,
+ .a6xx = &(const struct a6xx_info) {
+ .protect = &x285_protect,
+ .nonctxt_reglist = x285_nonctxt_regs,
+ .gbif_cx = a840_gbif,
+ .max_slices = 4,
+ .gmu_chipid = 0x8010100,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
+ },
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x44050a01),
+ .family = ADRENO_8XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen80200_sqe.fw",
+ [ADRENO_FW_GMU] = "gen80200_gmu.bin",
+ [ADRENO_FW_AQE] = "gen80200_aqe.fw",
+ },
+ .gmem = 18 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .funcs = &a8xx_gpu_funcs,
+ .a6xx = &(const struct a6xx_info) {
+ .protect = &a840_protect,
+ .nonctxt_reglist = a840_nonctxt_regs,
+ .gbif_cx = a840_gbif,
+ .max_slices = 3,
+ .gmu_chipid = 0x8020100,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 10687500,
+ },
+ { /* sentinel */ },
+ },
+ },
+ .preempt_record_size = 19708 * SZ_1K,
+ }
+};
+
+DECLARE_ADRENO_GPULIST(a8xx);
+
static inline __always_unused void __build_asserts(void)
{
BUILD_BUG_ON(a630_protect.count > a630_protect.count_max);
@@ -1619,4 +1957,5 @@ static inline __always_unused void __build_asserts(void)
BUILD_BUG_ON(a660_protect.count > a660_protect.count_max);
BUILD_BUG_ON(a690_protect.count > a690_protect.count_max);
BUILD_BUG_ON(a730_protect.count > a730_protect.count_max);
+ BUILD_BUG_ON(a840_protect.count > a840_protect.count_max);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index fc62fef2fed8..5903cd891b49 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -224,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
{
- u32 val;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int local = gmu->idle_level;
+ u32 val;
/* SPTP and IFPC both report as IFPC */
if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
local = GMU_IDLE_STATE_IFPC;
- val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ if (adreno_is_a8xx(adreno_gpu))
+ val = gmu_read(gmu, REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ else
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val == local) {
if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
@@ -269,7 +274,9 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
/* Set the log wptr index
* note: downstream saves the value in poweroff and restores it here
*/
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(gmu, REG_A8XX_GMU_GENERAL_9, 0);
+ else if (adreno_is_a7xx(adreno_gpu))
gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0);
else
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
@@ -350,12 +357,18 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int ret;
u32 val;
int request, ack;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+ /* Skip OOB calls since RGMU is not enabled */
+ if (adreno_has_rgmu(adreno_gpu))
+ return 0;
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
@@ -376,9 +389,23 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Trigger the equested OOB operation */
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
- /* Wait for the acknowledge interrupt */
- ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & (1 << ack), 100, 10000);
+ do {
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (!ret)
+ break;
+
+ if (completion_done(&a6xx_gpu->base.fault_coredump_done))
+ break;
+
+ /* We may timeout because the GMU is temporarily wedged from
+ * pending faults from the GPU and we are taking a devcoredump.
+ * Wait until the MMU is resumed and try again.
+ */
+ wait_for_completion(&a6xx_gpu->base.fault_coredump_done);
+ } while (true);
if (ret)
DRM_DEV_ERROR(gmu->dev,
@@ -395,10 +422,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int bit;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+ /* Skip OOB calls since RGMU is not enabled */
+ if (adreno_has_rgmu(adreno_gpu))
+ return;
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
@@ -485,17 +518,25 @@ static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
* in the power down sequence not being fully executed. That in turn can
* prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
*/
- if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0));
+ else if (adreno_is_a7xx(adreno_gpu) || (adreno_is_a621(adreno_gpu) ||
+ adreno_is_7c3(adreno_gpu)))
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0));
}
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int ret;
/* Disable the power counter so the GMU isn't busy */
- gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+ else
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
/* Disable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
@@ -522,10 +563,9 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
- a6xx_gemnoc_workaround(gmu);
-
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ a6xx_gemnoc_workaround(gmu);
return ret;
}
@@ -561,16 +601,22 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 bitmask = BIT(16);
int ret;
u32 val;
if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
return;
+ if (adreno_is_a840(adreno_gpu))
+ bitmask = BIT(30);
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
- val, val & (1 << 16), 100, 10000);
+ val, val & bitmask, 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
@@ -584,22 +630,24 @@ static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
writel(value, ptr + (offset << 2));
}
-static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
- const char *name);
-
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
- void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0;
void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
+ void __iomem *pdcptr;
bool pdc_in_aop = false;
+ /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */
+ if (adreno_is_a8xx(adreno_gpu))
+ return;
+
+ pdcptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc");
if (IS_ERR(pdcptr))
- goto err;
+ return;
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
@@ -612,9 +660,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_address_offset = 0x30080;
if (!pdc_in_aop) {
- seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ seqptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc_seq");
if (IS_ERR(seqptr))
- goto err;
+ return;
}
/* Disable SDE clock gating */
@@ -704,12 +752,6 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
-
-err:
- if (!IS_ERR_OR_NULL(pdcptr))
- iounmap(pdcptr);
- if (!IS_ERR_OR_NULL(seqptr))
- iounmap(seqptr);
}
/*
@@ -732,7 +774,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
/* A7xx knows better by default! */
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
return;
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
@@ -780,6 +822,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
return true;
}
+#define NEXT_BLK(blk) \
+ ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
+
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
@@ -792,7 +837,9 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
- if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu) ||
+ adreno_is_a8xx(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
@@ -811,7 +858,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
- blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ blk = NEXT_BLK(blk)) {
if (blk->size == 0)
continue;
@@ -847,7 +894,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx;
+ const struct adreno_reglist *gbif_cx = a6xx_info->gbif_cx;
u32 fence_range_lower, fence_range_upper;
u32 chipid = 0;
int ret;
@@ -856,12 +905,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
+ } else if (adreno_is_a8xx(adreno_gpu)) {
+ gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
+ gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
/* Turn on TCM (Tightly Coupled Memory) retention */
if (adreno_is_a7xx(adreno_gpu))
a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1);
- else
+ else if (!adreno_is_a8xx(adreno_gpu))
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
ret = a6xx_rpmh_start(gmu);
@@ -886,7 +938,10 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a8xx(adreno_gpu)) {
+ fence_range_upper = 0x32;
+ fence_range_lower = 0x8c0;
+ } else if (adreno_is_a7xx(adreno_gpu)) {
fence_range_upper = 0x32;
fence_range_lower = 0x8a0;
} else {
@@ -920,7 +975,12 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
}
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a8xx(adreno_gpu)) {
+ gmu_write(gmu, REG_A8XX_GMU_GENERAL_10, chipid);
+ gmu_write(gmu, REG_A8XX_GMU_GENERAL_8,
+ (gmu->log.iova & GENMASK(31, 12)) |
+ ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
+ } else if (adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid);
gmu_write(gmu, REG_A7XX_GMU_GENERAL_8,
(gmu->log.iova & GENMASK(31, 12)) |
@@ -932,6 +992,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu->log.iova | (gmu->log.size / SZ_4K - 1));
}
+ /* For A7x and newer, do the CX GBIF configurations before GMU wake up */
+ for (int i = 0; (gbif_cx && gbif_cx[i].offset); i++)
+ gpu_write(gpu, gbif_cx[i].offset, gbif_cx[i].value);
+
+ if (adreno_is_a8xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000);
+ gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33);
+ }
+
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -983,7 +1052,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
u32 val, seqmem_off = 0;
/* The second spin of A7xx GPUs messed with some register offsets.. */
- if (adreno_is_a740_family(adreno_gpu))
+ if (adreno_is_a740_family(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
seqmem_off = 4;
/* Make sure there are no outstanding RPMh votes */
@@ -996,7 +1065,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 1000);
- if (!adreno_is_a740_family(adreno_gpu))
+ if (!adreno_is_a740_family(adreno_gpu) && !adreno_is_a8xx(adreno_gpu))
return;
gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off,
@@ -1024,7 +1093,10 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+ else
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
/* Flush all the queues */
a6xx_hfi_stop(gmu);
@@ -1050,7 +1122,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
- a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, true);
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
@@ -1119,6 +1191,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
return ret;
}
+ /* Read the slice info on A8x GPUs */
+ a8xx_gpu_get_slice_info(gpu);
+
/* Set the bus quota to a reasonable value for boot */
a6xx_gmu_set_initial_bw(gpu, gmu);
@@ -1128,7 +1203,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
} else if (gmu->legacy) {
@@ -1222,7 +1297,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
if (ret)
goto force_off;
- a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
@@ -1457,7 +1532,7 @@ static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK);
/* GMUs on A7xx votes on both x & y */
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote);
else
data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote);
@@ -1489,13 +1564,14 @@ static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
}
static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
- unsigned long *freqs, int freqs_count, const char *id)
+ unsigned long *freqs, int freqs_count,
+ const char *pri_id, const char *sec_id)
{
int i, j;
const u16 *pri, *sec;
size_t pri_count, sec_count;
- pri = cmd_db_read_aux_data(id, &pri_count);
+ pri = cmd_db_read_aux_data(pri_id, &pri_count);
if (IS_ERR(pri))
return PTR_ERR(pri);
/*
@@ -1506,13 +1582,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
if (!pri_count)
return -EINVAL;
- /*
- * Some targets have a separate gfx mxc rail. So try to read that first and then fall back
- * to regular mx rail if it is missing
- */
- sec = cmd_db_read_aux_data("gmxc.lvl", &sec_count);
- if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER))
- sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ sec = cmd_db_read_aux_data(sec_id, &sec_count);
if (IS_ERR(sec))
return PTR_ERR(sec);
@@ -1566,6 +1636,57 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
return 0;
}
+static int a6xx_gmu_rpmh_dep_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count)
+{
+ const u16 *mx;
+ size_t count;
+
+ mx = cmd_db_read_aux_data("mx.lvl", &count);
+ if (IS_ERR(mx))
+ return PTR_ERR(mx);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ count >>= 1;
+ if (!count)
+ return -EINVAL;
+
+ /* Fix the vote for zero frequency */
+ votes[0] = 0xffffffff;
+
+ /* Construct a vote for rest of the corners */
+ for (int i = 1; i < freqs_count; i++) {
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+ u8 j, index = 0;
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < count; j++) {
+ if (mx[j] >= level) {
+ index = j;
+ break;
+ }
+ }
+
+ if (j == count) {
+ DRM_DEV_ERROR(dev,
+ "Mx Level %u not found in the RPMh list\n",
+ level);
+ DRM_DEV_ERROR(dev, "Available levels:\n");
+ for (j = 0; j < count; j++)
+ DRM_DEV_ERROR(dev, " %u\n", mx[j]);
+
+ return -EINVAL;
+ }
+
+ /* Construct the vote */
+ votes[i] = (0x3fff << 14) | (index << 8) | (0xff);
+ }
+
+ return 0;
+}
+
/*
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
@@ -1580,15 +1701,27 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *sec_id;
+ const u16 *gmxc;
int ret;
+ gmxc = cmd_db_read_aux_data("gmxc.lvl", NULL);
+ if (gmxc == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ /* If GMxC is present, prefer that as secondary rail for GX votes */
+ sec_id = IS_ERR_OR_NULL(gmxc) ? "mx.lvl" : "gmxc.lvl";
+
/* Build the GX votes */
ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
- gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl", sec_id);
/* Build the CX votes */
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
- gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl", "mx.lvl");
+
+ ret |= a6xx_gmu_rpmh_dep_votes_init(gmu->dev, gmu->dep_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs);
/* Build the interconnect votes */
if (info->bcms && gmu->nr_gpu_bws > 1)
@@ -1792,27 +1925,6 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
return 0;
}
-static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
- const char *name)
-{
- void __iomem *ret;
- struct resource *res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, name);
-
- if (!res) {
- DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
- return ERR_PTR(-EINVAL);
- }
-
- ret = ioremap(res->start, resource_size(res));
- if (!ret) {
- DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
- return ERR_PTR(-EINVAL);
- }
-
- return ret;
-}
-
static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
const char *name, irq_handler_t handler)
{
@@ -1863,7 +1975,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- struct platform_device *pdev = to_platform_device(gmu->dev);
mutex_lock(&gmu->lock);
if (!gmu->initialized) {
@@ -1892,12 +2003,11 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
qmp_put(gmu->qmp);
iounmap(gmu->mmio);
- if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
- iounmap(gmu->rscc);
gmu->mmio = NULL;
gmu->rscc = NULL;
- if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ if (!adreno_has_gmu_wrapper(adreno_gpu) &&
+ !adreno_has_rgmu(adreno_gpu)) {
a6xx_gmu_memory_free(gmu);
free_irq(gmu->gmu_irq, gmu);
@@ -1919,10 +2029,38 @@ static int cxpd_notifier_cb(struct notifier_block *nb,
return 0;
}
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name, resource_size_t *start)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = ioremap(res->start, resource_size(res));
+ if (!ret) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (start)
+ *start = res->start;
+
+ return ret;
+}
+
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct platform_device *pdev = of_find_device_by_node(node);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ resource_size_t start;
+ struct resource *res;
int ret;
if (!pdev)
@@ -1939,13 +2077,29 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Mark legacy for manual SPTPRAC control */
gmu->legacy = true;
+ /* RGMU requires clocks */
+ ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
+ if (ret < 0)
+ goto err_clk;
+
+ gmu->nr_clocks = ret;
+
/* Map the GMU registers */
- gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start);
if (IS_ERR(gmu->mmio)) {
ret = PTR_ERR(gmu->mmio);
goto err_mmio;
}
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_mmio;
+ }
+
+ /* Identify gmu base offset from gpu base address */
+ gmu->mmio_offset = (u32)(start - res->start);
+
gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
if (IS_ERR(gmu->cxpd)) {
ret = PTR_ERR(gmu->cxpd);
@@ -1978,6 +2132,7 @@ detach_cxpd:
err_mmio:
iounmap(gmu->mmio);
+err_clk:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
@@ -1986,10 +2141,13 @@ err_mmio:
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
+ struct platform_device *pdev = of_find_device_by_node(node);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- struct platform_device *pdev = of_find_device_by_node(node);
struct device_link *link;
+ resource_size_t start;
+ struct resource *res;
int ret;
if (!pdev)
@@ -2025,13 +2183,14 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu) ||
- adreno_is_a7xx(adreno_gpu)) {
+ adreno_is_a7xx(adreno_gpu) ||
+ adreno_is_a8xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
0x60400000, "debug");
if (ret)
goto err_memory;
- gmu->dummy.size = SZ_8K;
+ gmu->dummy.size = SZ_16K;
}
/* Allocate memory for the GMU dummy page */
@@ -2042,7 +2201,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu) ||
- adreno_is_a7xx(adreno_gpu)) {
+ adreno_is_a7xx(adreno_gpu) ||
+ adreno_is_a8xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
@@ -2084,19 +2244,30 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
/* Map the GMU registers */
- gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start);
if (IS_ERR(gmu->mmio)) {
ret = PTR_ERR(gmu->mmio);
goto err_memory;
}
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_mmio;
+ }
+
+ /* Identify gmu base offset from gpu base address */
+ gmu->mmio_offset = (u32)(start - res->start);
+
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
- gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ gmu->rscc = devm_platform_ioremap_resource_byname(pdev, "rscc");
if (IS_ERR(gmu->rscc)) {
ret = -ENODEV;
goto err_mmio;
}
+ } else if (adreno_is_a8xx(adreno_gpu)) {
+ gmu->rscc = gmu->mmio + 0x19000;
} else {
gmu->rscc = gmu->mmio + 0x23000;
}
@@ -2170,8 +2341,6 @@ detach_cxpd:
err_mmio:
iounmap(gmu->mmio);
- if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
- iounmap(gmu->rscc);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 06cfc294016f..2af074c8e8cf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -19,8 +19,8 @@ struct a6xx_gmu_bo {
u64 iova;
};
-#define GMU_MAX_GX_FREQS 16
-#define GMU_MAX_CX_FREQS 4
+#define GMU_MAX_GX_FREQS 32
+#define GMU_MAX_CX_FREQS 6
#define GMU_MAX_BCMS 3
struct a6xx_bcm {
@@ -68,6 +68,7 @@ struct a6xx_gmu {
struct drm_gpuvm *vm;
void __iomem *mmio;
+ u32 mmio_offset;
void __iomem *rscc;
int hfi_irq;
@@ -96,6 +97,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+ u32 dep_arc_votes[GMU_MAX_GX_FREQS];
struct a6xx_hfi_acd_table acd_table;
int nr_gpu_bws;
@@ -130,20 +132,23 @@ struct a6xx_gmu {
unsigned long status;
};
+#define GMU_BYTE_OFFSET(gmu, offset) (((offset) << 2) - (gmu)->mmio_offset)
+
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
{
- return readl(gmu->mmio + (offset << 2));
+ /* The 'offset' is based on GPU's start address. Adjust it */
+ return readl(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset));
}
static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
- writel(value, gmu->mmio + (offset << 2));
+ writel(value, gmu->mmio + GMU_BYTE_OFFSET(gmu, offset));
}
static inline void
gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
{
- memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ memcpy_toio(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset), data, size);
wmb();
}
@@ -160,17 +165,17 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
{
u64 val;
- val = (u64) readl(gmu->mmio + (lo << 2));
- val |= ((u64) readl(gmu->mmio + (hi << 2)) << 32);
+ val = gmu_read(gmu, lo);
+ val |= ((u64) gmu_read(gmu, hi) << 32);
return val;
}
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
- readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
- interval, timeout)
+ readl_poll_timeout((gmu)->mmio + (GMU_BYTE_OFFSET(gmu, addr)), val, \
+ cond, interval, timeout)
#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
- readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \
+ readl_poll_timeout_atomic((gmu)->mmio + (GMU_BYTE_OFFSET(gmu, addr)), val, cond, \
interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index b8f8ae940b55..0200a7e71cdf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -157,7 +157,7 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
}
}
-static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -224,7 +224,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, submit->seqno - 1);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
- OUT_RING(ring, CP_SET_THREAD_BOTH);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BOTH);
/* Reset state used to synchronize BR and BV */
OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
@@ -235,18 +235,31 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
- OUT_RING(ring, CP_SET_THREAD_BR);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BOTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, LRZ_FLUSH_INVALIDATE);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
}
if (!sysprof) {
- if (!adreno_is_a7xx(adreno_gpu)) {
+ if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) {
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
}
- OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
- OUT_RING(ring, 1);
+ if (adreno_is_a8xx(adreno_gpu)) {
+ OUT_PKT4(ring, REG_A8XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ OUT_PKT4(ring, REG_A8XX_RBBM_SLICE_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ } else {
+ OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ }
}
/* Execute the table update */
@@ -275,7 +288,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
* to make sure BV doesn't race ahead while BR is still switching
* pagetables.
*/
- if (adreno_is_a7xx(&a6xx_gpu->base)) {
+ if (adreno_is_a7xx(&a6xx_gpu->base) || adreno_is_a8xx(&a6xx_gpu->base)) {
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
}
@@ -289,20 +302,22 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
+ u32 reg_status = adreno_is_a8xx(adreno_gpu) ?
+ REG_A8XX_RBBM_PERFCTR_SRAM_INIT_STATUS :
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS;
/*
* Wait for SRAM clear after the pgtable update, so the
* two can happen in parallel:
*/
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
- OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(
- REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(reg_status));
OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0));
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
- if (!adreno_is_a7xx(adreno_gpu)) {
+ if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) {
/* Re-enable protected mode: */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
@@ -375,7 +390,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
- OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1);
OUT_RING(ring, submit->seqno);
/*
@@ -440,6 +455,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
+ u32 rbbm_perfctr_cp0, cp_always_on_counter;
unsigned int i, ibs = 0;
adreno_check_and_reenable_stall(adreno_gpu);
@@ -460,10 +476,16 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
if (gpu->nr_rings > 1)
a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue);
- get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
- rbmemptr_stats(ring, index, cpcycles_start));
- get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
- rbmemptr_stats(ring, index, alwayson_start));
+ if (adreno_is_a8xx(adreno_gpu)) {
+ rbbm_perfctr_cp0 = REG_A8XX_RBBM_PERFCTR_CP(0);
+ cp_always_on_counter = REG_A8XX_CP_ALWAYS_ON_COUNTER;
+ } else {
+ rbbm_perfctr_cp0 = REG_A7XX_RBBM_PERFCTR_CP(0);
+ cp_always_on_counter = REG_A6XX_CP_ALWAYS_ON_COUNTER;
+ }
+
+ get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_start));
+ get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_start));
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BOTH);
@@ -510,14 +532,17 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, 0x00e); /* IB1LIST end */
}
- get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
- rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
- rbmemptr_stats(ring, index, alwayson_end));
+ get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
- OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
- OUT_RING(ring, submit->seqno);
+ if (adreno_is_a8xx(adreno_gpu)) {
+ OUT_PKT4(ring, REG_A8XX_CP_SCRATCH_GLOBAL(2), 1);
+ OUT_RING(ring, submit->seqno);
+ } else {
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1);
+ OUT_RING(ring, submit->seqno);
+ }
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BR);
@@ -612,15 +637,26 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
if (adreno_is_a630(adreno_gpu))
clock_cntl_on = 0x8aa8aa02;
- else if (adreno_is_a610(adreno_gpu))
+ else if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu))
clock_cntl_on = 0xaaa8aa82;
else if (adreno_is_a702(adreno_gpu))
clock_cntl_on = 0xaaaaaa82;
else
clock_cntl_on = 0x8aa8aa82;
- cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
- cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
+ if (adreno_is_a612(adreno_gpu))
+ cgc_delay = 0x11;
+ else if (adreno_is_a615_family(adreno_gpu))
+ cgc_delay = 0x111;
+ else
+ cgc_delay = 0x10111;
+
+ if (adreno_is_a612(adreno_gpu))
+ cgc_hyst = 0x55;
+ else if (adreno_is_a615_family(adreno_gpu))
+ cgc_hyst = 0x555;
+ else
+ cgc_hyst = 0x5555;
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
@@ -706,14 +742,20 @@ static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
/* Copy the data into the internal struct to drop the const qualifier (temporarily) */
*cfg = *common_cfg;
- cfg->ubwc_swizzle = 0x6;
- cfg->highest_bank_bit = 15;
+ /* Use common config as is for A8x */
+ if (!adreno_is_a8xx(gpu)) {
+ cfg->ubwc_swizzle = 0x6;
+ cfg->highest_bank_bit = 15;
+ }
if (adreno_is_a610(gpu)) {
cfg->highest_bank_bit = 13;
cfg->ubwc_swizzle = 0x7;
}
+ if (adreno_is_a612(gpu))
+ cfg->highest_bank_bit = 14;
+
if (adreno_is_a618(gpu))
cfg->highest_bank_bit = 14;
@@ -993,7 +1035,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
return false;
/* A7xx is safe! */
- if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
return true;
/*
@@ -1076,6 +1118,23 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
}
}
+ if (!a6xx_gpu->aqe_bo && adreno_gpu->fw[ADRENO_FW_AQE]) {
+ a6xx_gpu->aqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_AQE], &a6xx_gpu->aqe_iova);
+
+ if (IS_ERR(a6xx_gpu->aqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->aqe_bo);
+
+ a6xx_gpu->aqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate AQE ucode: %d\n", ret);
+
+ return ret;
+ }
+
+ msm_gem_object_set_name(a6xx_gpu->aqe_bo, "aqefw");
+ }
+
/*
* Expanded APRIV and targets that support WHERE_AM_I both need a
* privileged buffer to store the RPTR shadow
@@ -1107,7 +1166,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
return 0;
}
-static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+int a6xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
int ret;
@@ -1220,17 +1279,20 @@ static int hw_init(struct msm_gpu *gpu)
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
- /* VBIF/GBIF start*/
- if (adreno_is_a610_family(adreno_gpu) ||
- adreno_is_a640_family(adreno_gpu) ||
- adreno_is_a650_family(adreno_gpu) ||
- adreno_is_a7xx(adreno_gpu)) {
+ /* For gmuwrapper implementations, do the VBIF/GBIF CX configuration here */
+ if (adreno_is_a610_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
- adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
+ }
+
+ if (adreno_is_a610_family(adreno_gpu) ||
+ adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x2120212);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
}
@@ -1285,10 +1347,10 @@ static int hw_init(struct msm_gpu *gpu)
}
if (adreno_is_a660_family(adreno_gpu))
- gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
+ gpu_write(gpu, REG_A7XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
/* Setting the mem pool size */
- if (adreno_is_a610(adreno_gpu)) {
+ if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
} else if (adreno_is_a702(adreno_gpu)) {
@@ -1321,7 +1383,8 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
- if (adreno_is_a730(adreno_gpu) ||
+ if (adreno_is_a612(adreno_gpu) ||
+ adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
else if (adreno_is_a690(adreno_gpu))
@@ -1540,7 +1603,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- int i, active_submits;
+ int active_submits;
adreno_dump_info(gpu);
@@ -1548,10 +1611,6 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* Sometimes crashstate capture is skipped, so SQE should be halted here again */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
- for (i = 0; i < 8; i++)
- DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
-
if (hang_debug)
a6xx_dump(gpu);
@@ -1576,9 +1635,9 @@ static void a6xx_recover(struct msm_gpu *gpu)
*/
gpu->active_submits = 0;
- if (adreno_has_gmu_wrapper(adreno_gpu)) {
+ if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu)) {
/* Drain the outstanding traffic on memory buses */
- a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, true);
/* Reset the GPU to a clean state */
a6xx_gpu_sw_reset(gpu, true);
@@ -1737,10 +1796,10 @@ static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *da
const char *block = "unknown";
u32 scratch[] = {
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(7)),
};
if (info)
@@ -2072,7 +2131,7 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
u32 fuse_val;
int ret;
- if (adreno_is_a750(adreno_gpu)) {
+ if (adreno_is_a750(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
/*
* Assume that if qcom scm isn't available, that whatever
* replacement allows writing the fuse register ourselves.
@@ -2098,9 +2157,9 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
return ret;
/*
- * On a750 raytracing may be disabled by the firmware, find out
- * whether that's the case. The scm call above sets the fuse
- * register.
+ * On A7XX_GEN3 and newer, raytracing may be disabled by the
+ * firmware, find out whether that's the case. The scm call
+ * above sets the fuse register.
*/
fuse_val = a6xx_llc_read(a6xx_gpu,
REG_A7XX_CX_MISC_SW_FUSE_VALUE);
@@ -2161,7 +2220,7 @@ void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
{
/* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
- if (adreno_is_a610(to_adreno_gpu(gpu)))
+ if (adreno_is_a610(to_adreno_gpu(gpu)) || adreno_is_a8xx(to_adreno_gpu(gpu)))
return;
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
@@ -2192,7 +2251,12 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu);
+ if (adreno_is_a8xx(adreno_gpu))
+ a8xx_llc_activate(a6xx_gpu);
+ else if (adreno_is_a7xx(adreno_gpu))
+ a7xx_llc_activate(a6xx_gpu);
+ else
+ a6xx_llc_activate(a6xx_gpu);
return ret;
}
@@ -2229,6 +2293,12 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
if (ret)
goto err_bulk_clk;
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret) {
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+ goto err_bulk_clk;
+ }
+
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_enable(gmu);
@@ -2242,8 +2312,10 @@ err_bulk_clk:
err_set_opp:
mutex_unlock(&a6xx_gpu->gmu.lock);
- if (!ret)
+ if (!ret) {
msm_devfreq_resume(gpu);
+ a6xx_llc_activate(a6xx_gpu);
+ }
return ret;
}
@@ -2284,17 +2356,20 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
trace_msm_gpu_suspend(0);
+ a6xx_llc_deactivate(a6xx_gpu);
+
msm_devfreq_suspend(gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
/* Drain the outstanding traffic on memory buses */
- a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, true);
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_disable(gmu);
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->gxpd);
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
@@ -2345,6 +2420,11 @@ static void a6xx_destroy(struct msm_gpu *gpu)
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
+ if (a6xx_gpu->aqe_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->aqe_bo, gpu->vm);
+ drm_gem_object_put(a6xx_gpu->aqe_bo);
+ }
+
if (a6xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->shadow_bo);
@@ -2527,7 +2607,105 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i
return 0;
}
-static const struct adreno_gpu_funcs funcs = {
+static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ extern int enable_preemption;
+ bool is_a7xx;
+ int ret, nr_rings = 1;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ mutex_init(&a6xx_gpu->gmu.lock);
+
+ adreno_gpu->registers = NULL;
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
+
+ adreno_gpu->base.hw_apriv =
+ !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
+
+ /* gpu->info only gets assigned in adreno_gpu_init(). A8x is included intentionally */
+ is_a7xx = config->info->family >= ADRENO_7XX_GEN1;
+
+ a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
+
+ ret = a6xx_set_supported_hw(&pdev->dev, config->info);
+ if (ret) {
+ a6xx_llc_slices_destroy(a6xx_gpu);
+ kfree(a6xx_gpu);
+ return ERR_PTR(ret);
+ }
+
+ if ((enable_preemption == 1) || (enable_preemption == -1 &&
+ (config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
+ nr_rings = 4;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ priv->gpu_clamp_to_idle = true;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu))
+ ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
+ else
+ ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
+ ret = a7xx_cx_mem_init(a6xx_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+ }
+
+ adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
+
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ adreno_gpu->funcs->mmu_fault_handler);
+
+ ret = a6xx_calc_ubwc_config(adreno_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a6xx_preempt_init(gpu);
+
+ return gpu;
+}
+
+const struct adreno_gpu_funcs a6xx_gpu_funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
@@ -2554,12 +2732,14 @@ static const struct adreno_gpu_funcs funcs = {
.create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
- .sysprof_setup = a6xx_gmu_sysprof_setup,
},
+ .init = a6xx_gpu_init,
.get_timestamp = a6xx_gmu_get_timestamp,
+ .bus_halt = a6xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a6xx_fault_handler,
};
-static const struct adreno_gpu_funcs funcs_gmuwrapper = {
+const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
@@ -2585,10 +2765,13 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
+ .init = a6xx_gpu_init,
.get_timestamp = a6xx_get_timestamp,
+ .bus_halt = a6xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a6xx_fault_handler,
};
-static const struct adreno_gpu_funcs funcs_a7xx = {
+const struct adreno_gpu_funcs a7xx_gpu_funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
@@ -2615,111 +2798,36 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
- .sysprof_setup = a6xx_gmu_sysprof_setup,
},
+ .init = a6xx_gpu_init,
.get_timestamp = a6xx_gmu_get_timestamp,
+ .bus_halt = a6xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a6xx_fault_handler,
};
-struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = priv->gpu_pdev;
- struct adreno_platform_config *config = pdev->dev.platform_data;
- struct device_node *node;
- struct a6xx_gpu *a6xx_gpu;
- struct adreno_gpu *adreno_gpu;
- struct msm_gpu *gpu;
- extern int enable_preemption;
- bool is_a7xx;
- int ret;
-
- a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
- if (!a6xx_gpu)
- return ERR_PTR(-ENOMEM);
-
- adreno_gpu = &a6xx_gpu->base;
- gpu = &adreno_gpu->base;
-
- mutex_init(&a6xx_gpu->gmu.lock);
-
- adreno_gpu->registers = NULL;
-
- /* Check if there is a GMU phandle and set it up */
- node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
- /* FIXME: How do we gracefully handle this? */
- BUG_ON(!node);
-
- adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
-
- adreno_gpu->base.hw_apriv =
- !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
-
- /* gpu->info only gets assigned in adreno_gpu_init() */
- is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
- config->info->family == ADRENO_7XX_GEN2 ||
- config->info->family == ADRENO_7XX_GEN3;
-
- a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
-
- ret = a6xx_set_supported_hw(&pdev->dev, config->info);
- if (ret) {
- a6xx_llc_slices_destroy(a6xx_gpu);
- kfree(a6xx_gpu);
- return ERR_PTR(ret);
- }
-
- if ((enable_preemption == 1) || (enable_preemption == -1 &&
- (config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4);
- else if (is_a7xx)
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
- else if (adreno_has_gmu_wrapper(adreno_gpu))
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
- else
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
-
- /*
- * For now only clamp to idle freq for devices where this is known not
- * to cause power supply issues:
- */
- if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- priv->gpu_clamp_to_idle = true;
-
- if (adreno_has_gmu_wrapper(adreno_gpu))
- ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
- else
- ret = a6xx_gmu_init(a6xx_gpu, node);
- of_node_put(node);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
-
- if (adreno_is_a7xx(adreno_gpu)) {
- ret = a7xx_cx_mem_init(a6xx_gpu);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
- }
-
- adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
-
- msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
- a6xx_fault_handler);
-
- ret = a6xx_calc_ubwc_config(adreno_gpu);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
-
- /* Set up the preemption specific bits and pieces for each ringbuffer */
- a6xx_preempt_init(gpu);
-
- return gpu;
-}
+const struct adreno_gpu_funcs a8xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a8xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a8xx_recover,
+ .submit = a7xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a8xx_irq,
+ .destroy = a6xx_destroy,
+ .gpu_busy = a8xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a8xx_progress,
+ },
+ .init = a6xx_gpu_init,
+ .get_timestamp = a8xx_gmu_get_timestamp,
+ .bus_halt = a8xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a8xx_fault_handler,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 0b17d36c36a9..6820216ec5fc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -46,6 +46,9 @@ struct a6xx_info {
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
const struct adreno_reglist_list *ifpc_reglist;
+ const struct adreno_reglist *gbif_cx;
+ const struct adreno_reglist_pipe *nonctxt_reglist;
+ u32 max_slices;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
@@ -57,6 +60,8 @@ struct a6xx_gpu {
struct drm_gem_object *sqe_bo;
uint64_t sqe_iova;
+ struct drm_gem_object *aqe_bo;
+ uint64_t aqe_iova;
struct msm_ringbuffer *cur_ring;
struct msm_ringbuffer *next_ring;
@@ -101,6 +106,11 @@ struct a6xx_gpu {
void *htw_llc_slice;
bool have_mmu500;
bool hung;
+
+ u32 cached_aperture;
+ spinlock_t aperture_lock;
+
+ u32 slice_mask;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -216,6 +226,11 @@ struct a7xx_cp_smmu_info {
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+extern const struct adreno_gpu_funcs a6xx_gpu_funcs;
+extern const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs;
+extern const struct adreno_gpu_funcs a7xx_gpu_funcs;
+extern const struct adreno_gpu_funcs a8xx_gpu_funcs;
+
static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
{
if(adreno_is_a630(gpu))
@@ -298,5 +313,19 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state);
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b);
-
+void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+int a6xx_zap_shader_init(struct msm_gpu *gpu);
+
+void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
+int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data);
+void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value);
+u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate);
+int a8xx_gpu_feature_probe(struct msm_gpu *gpu);
+void a8xx_gpu_get_slice_info(struct msm_gpu *gpu);
+int a8xx_hw_init(struct msm_gpu *gpu);
+irqreturn_t a8xx_irq(struct msm_gpu *gpu);
+void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu);
+bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void a8xx_recover(struct msm_gpu *gpu);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 4c7f3c642f6a..d2d6b2fd3cba 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1255,7 +1255,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
return;
/* Set the fence to ALLOW mode so we can access the registers */
- gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
&a6xx_state->gmu_registers[3], false);
@@ -1596,7 +1596,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
/* Get the generic state from the adreno core */
adreno_gpu_state_get(gpu, &a6xx_state->base);
- if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ if (!adreno_has_gmu_wrapper(adreno_gpu) &&
+ !adreno_has_rgmu(adreno_gpu)) {
a6xx_get_gmu_registers(gpu, a6xx_state);
a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 1c18499b60bb..b49d8427b59e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -71,8 +71,8 @@ static const struct a6xx_cluster {
u32 sel_val;
} a6xx_clusters[] = {
CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
- CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
- CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
@@ -303,8 +303,8 @@ static const u32 a660_registers[] = {
static const struct a6xx_registers a6xx_reglist[] = {
REGS(a6xx_registers, 0, 0),
REGS(a660_registers, 0, 0),
- REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
- REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
};
static const u32 a6xx_ahb_registers[] = {
@@ -343,48 +343,48 @@ static const struct a6xx_registers a6xx_gbif_reglist =
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
- 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
- 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
- 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
- 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
- 0x0100, 0x012b, 0x0140, 0x0140,
+ 0x1a800, 0x1a800, 0x1a810, 0x1a813, 0x1a816, 0x1a816, 0x1a818, 0x1a81b,
+ 0x1a81e, 0x1a81e, 0x1a820, 0x1a823, 0x1a826, 0x1a826, 0x1a828, 0x1a82b,
+ 0x1a82e, 0x1a82e, 0x1a830, 0x1a833, 0x1a836, 0x1a836, 0x1a838, 0x1a83b,
+ 0x1a83e, 0x1a83e, 0x1a840, 0x1a843, 0x1a846, 0x1a846, 0x1a880, 0x1a884,
+ 0x1a900, 0x1a92b, 0x1a940, 0x1a940,
};
static const u32 a6xx_gmu_cx_registers[] = {
/* GMU CX */
- 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
- 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
- 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
- 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
- 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
- 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
- 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
- 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
- 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ 0x1f400, 0x1f407, 0x1f410, 0x1f412, 0x1f500, 0x1f500, 0x1f507, 0x1f50a,
+ 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c, 0x1f80f, 0x1f81c,
+ 0x1f824, 0x1f82a, 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f887, 0x1f889,
+ 0x1f8a0, 0x1f8a2, 0x1f8a4, 0x1f8af, 0x1f8c0, 0x1f8c3, 0x1f8d0, 0x1f8d0,
+ 0x1f8e4, 0x1f8e4, 0x1f8e8, 0x1f8ec, 0x1f900, 0x1f903, 0x1f940, 0x1f940,
+ 0x1f942, 0x1f944, 0x1f94c, 0x1f94d, 0x1f94f, 0x1f951, 0x1f954, 0x1f954,
+ 0x1f957, 0x1f958, 0x1f95d, 0x1f95d, 0x1f962, 0x1f962, 0x1f964, 0x1f965,
+ 0x1f980, 0x1f986, 0x1f990, 0x1f99e, 0x1f9c0, 0x1f9c0, 0x1f9c5, 0x1f9cc,
+ 0x1f9e0, 0x1f9e2, 0x1f9f0, 0x1f9f0, 0x1fa00, 0x1fa01,
/* GMU AO */
- 0x9300, 0x9316, 0x9400, 0x9400,
+ 0x23b00, 0x23b16, 0x23c00, 0x23c00,
};
static const u32 a6xx_gmu_gpucc_registers[] = {
/* GPU CC */
- 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
- 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
- 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
- 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
- 0xb800, 0xb802,
+ 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440b,
+ 0x24415, 0x2441c, 0x2441e, 0x2442d, 0x2443c, 0x2443d, 0x2443f, 0x24440,
+ 0x24442, 0x24449, 0x24458, 0x2445a, 0x24540, 0x2455e, 0x24800, 0x24802,
+ 0x24c00, 0x24c02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25c00, 0x25c02,
+ 0x26000, 0x26002,
/* GPU CC ACD */
- 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+ 0x26400, 0x26416, 0x26420, 0x26427,
};
static const u32 a621_gmu_gpucc_registers[] = {
/* GPU CC */
- 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404,
- 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30,
- 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a,
- 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5,
- 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc,
- 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16,
- 0xbe20, 0xbe2d,
+ 0x24000, 0x2400e, 0x24400, 0x2440e, 0x25800, 0x25804, 0x25c00, 0x25c04,
+ 0x26000, 0x26004, 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430,
+ 0x26432, 0x26432, 0x26441, 0x26455, 0x26466, 0x26468, 0x26478, 0x2647a,
+ 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a3, 0x264b3, 0x264b5,
+ 0x264c5, 0x264c7, 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc,
+ 0x2650b, 0x2650c, 0x2651c, 0x2651e, 0x26540, 0x26570, 0x26600, 0x26616,
+ 0x26620, 0x2662d,
};
static const u32 a6xx_gmu_cx_rscc_registers[] = {
@@ -575,7 +575,7 @@ struct gen7_sptp_cluster_registers {
/* statetype: SP block state type for the cluster */
enum a7xx_statetype_id statetype;
/* pipe_id: Pipe identifier */
- enum a7xx_pipe pipe_id;
+ enum adreno_pipe pipe_id;
/* context_id: Context identifier */
int context_id;
/* location_id: Location identifier */
@@ -801,10 +801,10 @@ static const char *a7xx_statetype_names[] = {
};
static const char *a7xx_pipe_names[] = {
- A7XX_NAME(A7XX_PIPE_NONE),
- A7XX_NAME(A7XX_PIPE_BR),
- A7XX_NAME(A7XX_PIPE_BV),
- A7XX_NAME(A7XX_PIPE_LPAC),
+ A7XX_NAME(PIPE_NONE),
+ A7XX_NAME(PIPE_BR),
+ A7XX_NAME(PIPE_BV),
+ A7XX_NAME(PIPE_LPAC),
};
static const char *a7xx_cluster_names[] = {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 550de6ad68ef..53cfdf4e6c34 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -23,6 +23,7 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_START),
HFI_MSG_ID(HFI_H2F_FEATURE_CTRL),
HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
};
@@ -105,10 +106,25 @@ static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seq
{
int ret;
u32 val;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+
+ do {
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
+
+ if (!ret)
+ break;
- /* Wait for a response */
- ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
+ if (completion_done(&a6xx_gpu->base.fault_coredump_done))
+ break;
+
+ /* We may timeout because the GMU is temporarily wedged from
+ * pending faults from the GPU and we are taking a devcoredump.
+ * Wait until the MMU is resumed and try again.
+ */
+ wait_for_completion(&a6xx_gpu->base.fault_coredump_done);
+ } while (true);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
@@ -255,11 +271,63 @@ static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
NULL, 0);
}
+static int a8xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ unsigned int num_gx_votes = 3, num_cx_votes = 2;
+ struct a6xx_hfi_table_entry *entry;
+ struct a6xx_hfi_table *tbl;
+ int ret, i;
+ u32 size;
+
+ size = sizeof(*tbl) + (2 * sizeof(tbl->entry[0])) +
+ (gmu->nr_gpu_freqs * num_gx_votes * sizeof(gmu->gx_arc_votes[0])) +
+ (gmu->nr_gmu_freqs * num_cx_votes * sizeof(gmu->cx_arc_votes[0]));
+ tbl = kzalloc(size, GFP_KERNEL);
+ tbl->type = HFI_TABLE_GPU_PERF;
+
+ /* First fill GX votes */
+ entry = &tbl->entry[0];
+ entry->count = gmu->nr_gpu_freqs;
+ entry->stride = num_gx_votes;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ unsigned int base = i * entry->stride;
+
+ entry->data[base+0] = gmu->gx_arc_votes[i];
+ entry->data[base+1] = gmu->dep_arc_votes[i];
+ entry->data[base+2] = gmu->gpu_freqs[i] / 1000;
+ }
+
+ /* Then fill CX votes */
+ entry = (struct a6xx_hfi_table_entry *)
+ &tbl->entry[0].data[gmu->nr_gpu_freqs * num_gx_votes];
+
+ entry->count = gmu->nr_gmu_freqs;
+ entry->stride = num_cx_votes;
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ unsigned int base = i * entry->stride;
+
+ entry->data[base] = gmu->cx_arc_votes[i];
+ entry->data[base+1] = gmu->gmu_freqs[i] / 1000;
+ }
+
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TABLE, tbl, size, NULL, 0);
+
+ kfree(tbl);
+ return ret;
+}
+
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_hfi_msg_perf_table msg = { 0 };
int i;
+ if (adreno_is_a8xx(adreno_gpu))
+ return a8xx_hfi_send_perf_table(gmu);
+
msg.num_gpu_levels = gmu->nr_gpu_freqs;
msg.num_gmu_levels = gmu->nr_gmu_freqs;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 653ef720e2da..6f9f74a0bc85 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -185,6 +185,23 @@ struct a6xx_hfi_msg_core_fw_start {
u32 handle;
};
+#define HFI_H2F_MSG_TABLE 15
+
+struct a6xx_hfi_table_entry {
+ u32 count;
+ u32 stride;
+ u32 data[];
+};
+
+struct a6xx_hfi_table {
+ u32 header;
+ u32 version;
+ u32 type;
+#define HFI_TABLE_BW_VOTE 0
+#define HFI_TABLE_GPU_PERF 1
+ struct a6xx_hfi_table_entry entry[];
+};
+
#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
struct a6xx_hfi_gx_bw_perf_vote_cmd {
diff --git a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
new file mode 100644
index 000000000000..30de078e9dfd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define GPU_PAS_ID 13
+
+static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+
+ val = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe) | A8XX_CP_APERTURE_CNTL_HOST_SLICEID(slice);
+
+ if (a6xx_gpu->cached_aperture == val)
+ return;
+
+ gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val);
+
+ a6xx_gpu->cached_aperture = val;
+}
+
+static void a8xx_aperture_acquire(struct msm_gpu *gpu, enum adreno_pipe pipe, unsigned long *flags)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ spin_lock_irqsave(&a6xx_gpu->aperture_lock, *flags);
+
+ a8xx_aperture_slice_set(gpu, pipe, 0);
+}
+
+static void a8xx_aperture_release(struct msm_gpu *gpu, unsigned long flags)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags);
+}
+
+static void a8xx_aperture_clear(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+
+ a8xx_aperture_acquire(gpu, PIPE_NONE, &flags);
+ a8xx_aperture_release(gpu, flags);
+}
+
+static void a8xx_write_pipe(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 offset, u32 data)
+{
+ unsigned long flags;
+
+ a8xx_aperture_acquire(gpu, pipe, &flags);
+ gpu_write(gpu, offset, data);
+ a8xx_aperture_release(gpu, flags);
+}
+
+static u32 a8xx_read_pipe_slice(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice, u32 offset)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&a6xx_gpu->aperture_lock, flags);
+ a8xx_aperture_slice_set(gpu, pipe, slice);
+ val = gpu_read(gpu, offset);
+ spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags);
+
+ return val;
+}
+
+void a8xx_gpu_get_slice_info(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+ u32 slice_mask;
+
+ if (adreno_gpu->info->family < ADRENO_8XX_GEN1)
+ return;
+
+ if (a6xx_gpu->slice_mask)
+ return;
+
+ slice_mask = GENMASK(info->max_slices - 1, 0);
+
+ /* GEN1 doesn't support partial slice configurations */
+ if (adreno_gpu->info->family == ADRENO_8XX_GEN1) {
+ a6xx_gpu->slice_mask = slice_mask;
+ return;
+ }
+
+ slice_mask &= a6xx_llc_read(a6xx_gpu,
+ REG_A8XX_CX_MISC_SLICE_ENABLE_FINAL);
+
+ a6xx_gpu->slice_mask = slice_mask;
+
+ /* Chip ID depends on the number of slices available. So update it */
+ adreno_gpu->chip_id |= FIELD_PREP(GENMASK(7, 4), hweight32(slice_mask));
+}
+
+static u32 a8xx_get_first_slice(struct a6xx_gpu *a6xx_gpu)
+{
+ return ffs(a6xx_gpu->slice_mask) - 1;
+}
+
+static inline bool _a8xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check that the CX master is idle */
+ if (gpu_read(gpu, REG_A8XX_RBBM_STATUS) &
+ ~A8XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+static bool a8xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a8xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A8XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ /* Update HW if this is the current ring and we are not in preempt*/
+ if (!a6xx_in_preempt(a6xx_gpu)) {
+ if (a6xx_gpu->cur_ring == ring)
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ else
+ ring->restore_wptr = true;
+ } else {
+ ring->restore_wptr = true;
+ }
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+}
+
+static void a8xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ if (adreno_is_x285(adreno_gpu) && state)
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_0_PC, 0x00000702);
+
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? 0x110111 : 0);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? 0x55555 : 0);
+
+ gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_GLOBAL_LOAD_CMD, !!state);
+
+ if (state) {
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_P2S_TRIG_CMD, 1);
+
+ if (gpu_poll_timeout(gpu, REG_A8XX_RBBM_CGC_P2S_STATUS, val,
+ val & A8XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) {
+ dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n");
+ return;
+ }
+
+ gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 0);
+ } else {
+ /*
+ * GMU enables clk gating in GBIF during boot up. So,
+ * override that here when hwcg feature is disabled
+ */
+ gpu_rmw(gpu, REG_A8XX_GBIF_CX_CONFIG, BIT(0), 0);
+ }
+}
+
+static void a8xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect;
+ u32 cntl, final_cfg;
+ unsigned int i;
+
+ cntl = A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_PROT_EN |
+ A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_FAULT_ON_VIOL_EN |
+ A8XX_CP_PROTECT_CNTL_PIPE_LAST_SPAN_INF_RANGE |
+ A8XX_CP_PROTECT_CNTL_PIPE_HALT_SQE_RANGE__MASK;
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl);
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl);
+
+ a8xx_aperture_clear(gpu);
+
+ for (i = 0; i < protect->count; i++) {
+ /* Intentionally skip writing to some registers */
+ if (protect->regs[i]) {
+ gpu_write(gpu, REG_A8XX_CP_PROTECT_GLOBAL(i), protect->regs[i]);
+ final_cfg = protect->regs[i];
+ }
+ }
+
+ /*
+ * Last span feature is only supported on PIPE specific register.
+ * So update those here
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg);
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg);
+
+ a8xx_aperture_clear(gpu);
+}
+
+static void a8xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config;
+ u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2);
+ u32 level3_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL3);
+ bool rgba8888_lossless = false, fp16compoptdis = false;
+ bool yuvnotcomptofc = false, min_acc_len_64b = false;
+ bool rgb565_predicator = false, amsbc = false;
+ bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg);
+ u32 ubwc_version = cfg->ubwc_enc_version;
+ u32 hbb, hbb_hi, hbb_lo, mode = 1;
+ u8 uavflagprd_inv = 2;
+
+ switch (ubwc_version) {
+ case UBWC_5_0:
+ amsbc = true;
+ rgb565_predicator = true;
+ mode = 4;
+ break;
+ case UBWC_4_0:
+ amsbc = true;
+ rgb565_predicator = true;
+ fp16compoptdis = true;
+ rgba8888_lossless = true;
+ mode = 2;
+ break;
+ case UBWC_3_0:
+ amsbc = true;
+ mode = 1;
+ break;
+ default:
+ dev_err(&gpu->pdev->dev, "Unknown UBWC version: 0x%x\n", ubwc_version);
+ break;
+ }
+
+ /*
+ * We subtract 13 from the highest bank bit (13 is the minimum value
+ * allowed by hw) and write the lowest two bits of the remaining value
+ * as hbb_lo and the one above it as hbb_hi to the hardware.
+ */
+ WARN_ON(cfg->highest_bank_bit < 13);
+ hbb = cfg->highest_bank_bit - 13;
+ hbb_hi = hbb >> 2;
+ hbb_lo = hbb & 3;
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL,
+ yuvnotcomptofc << 6 |
+ hbb_hi << 3 |
+ hbb_lo << 1);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CMP_NC_MODE_CNTL,
+ mode << 15 |
+ yuvnotcomptofc << 6 |
+ rgba8888_lossless << 4 |
+ fp16compoptdis << 3 |
+ rgb565_predicator << 2 |
+ amsbc << 1 |
+ min_acc_len_64b);
+
+ a8xx_aperture_clear(gpu);
+
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ level3_swizzling_dis << 13 |
+ level2_swizzling_dis << 12 |
+ hbb_hi << 10 |
+ uavflagprd_inv << 4 |
+ min_acc_len_64b << 3 |
+ hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
+ level3_swizzling_dis << 7 |
+ level2_swizzling_dis << 6 |
+ hbb_hi << 4 |
+ min_acc_len_64b << 3 |
+ hbb_lo << 1 | ubwc_mode);
+}
+
+static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+ const struct adreno_reglist_pipe *regs = info->nonctxt_reglist;
+ unsigned int pipe_id, i;
+ unsigned long flags;
+
+ for (pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ /* We don't have support for LPAC yet */
+ if (pipe_id == PIPE_LPAC)
+ continue;
+
+ a8xx_aperture_acquire(gpu, pipe_id, &flags);
+
+ for (i = 0; regs[i].offset; i++) {
+ if (!(BIT(pipe_id) & regs[i].pipe))
+ continue;
+
+ if (regs[i].offset == REG_A8XX_RB_GC_GMEM_PROTECT)
+ *gmem_protect = regs[i].value;
+
+ gpu_write(gpu, regs[i].offset, regs[i].value);
+ }
+
+ a8xx_aperture_release(gpu, flags);
+ }
+
+ a8xx_aperture_clear(gpu);
+}
+
+static int a8xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+ u32 mask;
+
+ /* Disable concurrent binning before sending CP init */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, BIT(27));
+
+ OUT_PKT7(ring, CP_ME_INIT, 4);
+
+ /* Use multiple HW contexts */
+ mask = BIT(0);
+
+ /* Enable error detection */
+ mask |= BIT(1);
+
+ /* Set default reset state */
+ mask |= BIT(3);
+
+ /* Disable save/restore of performance counters across preemption */
+ mask |= BIT(6);
+
+ OUT_RING(ring, mask);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Operation mode mask */
+ OUT_RING(ring, 0x00000002);
+
+ a6xx_flush(gpu, ring);
+ return a8xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+#define A8XX_INT_MASK \
+ (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_SW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
+ A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
+ A6XX_RBBM_INT_0_MASK_TSBWRITEERROR | \
+ A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
+
+#define A8XX_APRIV_MASK \
+ (A8XX_CP_APRIV_CNTL_PIPE_ICACHE | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBFETCH | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBPRIVLEVEL | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBRPWB)
+
+#define A8XX_BR_APRIV_MASK \
+ (A8XX_APRIV_MASK | \
+ A8XX_CP_APRIV_CNTL_PIPE_CDREAD | \
+ A8XX_CP_APRIV_CNTL_PIPE_CDWRITE)
+
+#define A8XX_CP_GLOBAL_INT_MASK \
+ (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTLPAC | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE0 | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE1 | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBR | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBV | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTLPAC | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE0 | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE1 | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBR | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBV)
+
+#define A8XX_CP_INTERRUPT_STATUS_MASK_PIPE \
+ (A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFRBWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB1WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB2WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB3WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFSDSWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFMRBWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFVSDWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_OPCODEERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VSDPARITYERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_REGISTERPROTECTIONERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_ILLEGALINSTRUCTION | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_SMMUFAULT | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPCLIENT| \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPTYPE | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPREAD | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_RTWROVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTWROVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTREFCNTOVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTCLRRESMISS)
+
+#define A8XX_CP_HW_FAULT_STATUS_MASK_PIPE \
+ (A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFRBFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB1FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB2FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB3FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFSDSFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFMRBFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFVSDFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_SQEREADBURSTOVF | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_EVENTENGINEOVF | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_UCODEERROR)
+
+static int hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int pipe_id, i;
+ u32 gmem_protect = 0;
+ u64 gmem_range_min;
+ int ret;
+
+ ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ if (ret)
+ return ret;
+
+ /* Clear the cached value to force aperture configuration next time */
+ a6xx_gpu->cached_aperture = UINT_MAX;
+ a8xx_aperture_clear(gpu);
+
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_read(gpu, REG_A6XX_GBIF_HALT);
+
+ gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 0);
+ gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Setup GMEM Range in UCHE */
+ gmem_range_min = SZ_64M;
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN, gmem_range_min);
+ gpu_write64(gpu, REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN, gmem_range_min);
+
+ /* Setup UCHE Trap region */
+ gpu_write64(gpu, REG_A8XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_CNTL, 0x1);
+ gpu_write(gpu, REG_A8XX_RBBM_SLICE_PERFCTR_CNTL, 0x1);
+
+ /* Turn on the IFPC counter (countable 4 on XOCLK1) */
+ gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1,
+ FIELD_PREP(GENMASK(7, 0), 0x4));
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A8XX_CP_PERFCTR_CP_SEL(0), 1);
+
+ a8xx_set_ubwc_config(gpu);
+
+ /* Set weights for bicubic filtering */
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), 0x3fe05ff4);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), 0x3fa0ebee);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), 0x3f5193ed);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), 0x3f0243f0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(5), 0x00000000);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(6), 0x3fd093e8);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(7), 0x3f4133dc);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(8), 0x3ea1dfdb);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(9), 0x3e0283e0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(10), 0x0000ac2b);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(11), 0x0000f01d);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(12), 0x00114412);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(13), 0x0021980a);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(14), 0x0051ec05);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(15), 0x0000380e);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(16), 0x3ff09001);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(17), 0x3fc10bfa);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(18), 0x3f9193f7);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(19), 0x3f7227f7);
+
+ gpu_write(gpu, REG_A8XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
+
+ a8xx_nonctxt_config(gpu, &gmem_protect);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, BIT(30) | 0xcfffff);
+ gpu_write(gpu, REG_A8XX_RBBM_SLICE_INTERFACE_HANG_INT_CNTL, BIT(30));
+
+ /* Set up the CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+
+ /* Enable the power counter */
+ gmu_rmw(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0, 0xff, BIT(5));
+ gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
+ /* Protect registers from the CP */
+ a8xx_set_cp_protect(gpu);
+
+ /* Enable the GMEM save/restore feature for preemption */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 1);
+
+ for (pipe_id = PIPE_BR; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ u32 apriv_mask = A8XX_APRIV_MASK;
+ unsigned long flags;
+
+ if (pipe_id == PIPE_LPAC)
+ continue;
+
+ if (pipe_id == PIPE_BR)
+ apriv_mask = A8XX_BR_APRIV_MASK;
+
+ a8xx_aperture_acquire(gpu, pipe_id, &flags);
+ gpu_write(gpu, REG_A8XX_CP_APRIV_CNTL_PIPE, apriv_mask);
+ gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_PIPE,
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE);
+ gpu_write(gpu, REG_A8XX_CP_HW_FAULT_STATUS_MASK_PIPE,
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE);
+ a8xx_aperture_release(gpu, flags);
+ }
+
+ a8xx_aperture_clear(gpu);
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, A8XX_CP_GLOBAL_INT_MASK);
+ gpu_write(gpu, REG_A8XX_RBBM_INT_0_MASK, A8XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write64(gpu, REG_A8XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+ if (a6xx_gpu->aqe_iova)
+ gpu_write64(gpu, REG_A8XX_CP_AQE_INSTR_BASE_0, a6xx_gpu->aqe_iova);
+
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+
+ /* Configure the RPTR shadow if needed: */
+ gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0]));
+ gpu_write64(gpu, REG_A8XX_CP_RB_RPTR_ADDR_BV, rbmemptr(gpu->rb[0], bv_rptr));
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ gpu->rb[i]->cur_ctx_seqno = 0;
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A8XX_CP_SQE_CNTL, 1);
+
+ ret = a8xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a8xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ } else {
+ return ret;
+ }
+
+ /*
+ * GMEM_PROTECT register should be programmed after GPU is transitioned to
+ * non-secure mode
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_GC_GMEM_PROTECT, gmem_protect);
+ WARN_ON(!gmem_protect);
+ a8xx_aperture_clear(gpu);
+
+ /* Enable hardware clockgating */
+ a8xx_set_hwcg(gpu, true);
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ return ret;
+}
+
+int a8xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = hw_init(gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return ret;
+}
+
+static void a8xx_dump(struct msm_gpu *gpu)
+{
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", gpu_read(gpu, REG_A8XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+void a8xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int active_submits;
+
+ adreno_dump_info(gpu);
+
+ if (hang_debug)
+ a8xx_dump(gpu);
+
+ /*
+ * To handle recovery specific sequences during the rpm suspend we are
+ * about to trigger
+ */
+ a6xx_gpu->hung = true;
+
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A8XX_CP_SQE_CNTL, 3);
+
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ reinit_completion(&gmu->pd_gate);
+ dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
+ dev_pm_genpd_synced_poweroff(gmu->cxpd);
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
+ DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
+
+ dev_pm_genpd_remove_notifier(gmu->cxpd);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
+
+ msm_gpu_hw_init(gpu);
+ a6xx_gpu->hung = false;
+}
+
+static const char *a8xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+{
+ static const char * const uche_clients[] = {
+ "BR_VFD", "BR_SP", "BR_VSC", "BR_VPC", "BR_HLSQ", "BR_PC", "BR_LRZ", "BR_TP",
+ "BV_VFD", "BV_SP", "BV_VSC", "BV_VPC", "BV_HLSQ", "BV_PC", "BV_LRZ", "BV_TP",
+ "STCHE",
+ };
+ static const char * const uche_clients_lpac[] = {
+ "-", "SP_LPAC", "-", "-", "HLSQ_LPAC", "-", "-", "TP_LPAC",
+ };
+ u32 val;
+
+ /*
+ * The source of the data depends on the mid ID read from FSYNR1.
+ * and the client ID read from the UCHE block
+ */
+ val = gpu_read(gpu, REG_A8XX_UCHE_CLIENT_PF);
+
+ val &= GENMASK(6, 0);
+
+ /* mid=3 refers to BR or BV */
+ if (mid == 3) {
+ if (val < ARRAY_SIZE(uche_clients))
+ return uche_clients[val];
+ else
+ return "UCHE";
+ }
+
+ /* mid=8 refers to LPAC */
+ if (mid == 8) {
+ if (val < ARRAY_SIZE(uche_clients_lpac))
+ return uche_clients_lpac[val];
+ else
+ return "UCHE_LPAC";
+ }
+
+ return "Unknown";
+}
+
+static const char *a8xx_fault_block(struct msm_gpu *gpu, u32 id)
+{
+ switch (id) {
+ case 0x0:
+ return "CP";
+ case 0x1:
+ return "UCHE: Unknown";
+ case 0x2:
+ return "UCHE_LPAC: Unknown";
+ case 0x3:
+ case 0x8:
+ return a8xx_uche_fault_block(gpu, id);
+ case 0x4:
+ return "CCU";
+ case 0x5:
+ return "Flag cache";
+ case 0x6:
+ return "PREFETCH";
+ case 0x7:
+ return "GMU";
+ case 0x9:
+ return "UCHE_HPAC";
+ }
+
+ return "Unknown";
+}
+
+int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_gpu *gpu = arg;
+ struct adreno_smmu_fault_info *info = data;
+ const char *block = "unknown";
+
+ u32 scratch[] = {
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(0)),
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(1)),
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(2)),
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(3)),
+ };
+
+ if (info)
+ block = a8xx_fault_block(gpu, info->fsynr1 & 0xff);
+
+ return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
+}
+
+static void a8xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A8XX_CP_INTERRUPT_STATUS_GLOBAL);
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 slice = a8xx_get_first_slice(a6xx_gpu);
+ u32 hw_fault_mask = GENMASK(6, 0);
+ u32 sw_fault_mask = GENMASK(22, 16);
+ u32 pipe = 0;
+
+ dev_err_ratelimited(&gpu->pdev->dev, "CP Fault Global INT status: 0x%x\n", status);
+
+ if (status & (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR |
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR))
+ pipe |= BIT(PIPE_BR);
+
+ if (status & (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV |
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV))
+ pipe |= BIT(PIPE_BV);
+
+ if (!pipe) {
+ dev_err_ratelimited(&gpu->pdev->dev, "CP Fault Unknown pipe\n");
+ goto out;
+ }
+
+ for (unsigned int pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ if (!(BIT(pipe_id) & pipe))
+ continue;
+
+ if (hw_fault_mask & status) {
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_HW_FAULT_STATUS_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP HW FAULT pipe: %u status: 0x%x\n", pipe_id, status);
+ }
+
+ if (sw_fault_mask & status) {
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_INTERRUPT_STATUS_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP SW FAULT pipe: %u status: 0x%x\n", pipe_id, status);
+
+ if (status & BIT(8)) {
+ a8xx_write_pipe(gpu, pipe_id, REG_A8XX_CP_SQE_STAT_ADDR_PIPE, 1);
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_SQE_STAT_DATA_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP Opcode error, opcode=0x%x\n", status);
+ }
+
+ if (status & BIT(10)) {
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_PROTECT_STATUS_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP REG PROTECT error, status=0x%x\n", status);
+ }
+ }
+ }
+
+out:
+ /* Turn off interrupts to avoid triggering recovery again */
+ a8xx_aperture_clear(gpu);
+ gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, 0);
+ gpu_write(gpu, REG_A8XX_RBBM_INT_0_MASK, 0);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static u32 gpu_periph_read(struct msm_gpu *gpu, u32 dbg_offset)
+{
+ gpu_write(gpu, REG_A8XX_CP_SQE_UCODE_DBG_ADDR_PIPE, dbg_offset);
+
+ return gpu_read(gpu, REG_A8XX_CP_SQE_UCODE_DBG_DATA_PIPE);
+}
+
+static u64 gpu_periph_read64(struct msm_gpu *gpu, u32 dbg_offset)
+{
+ u64 lo, hi;
+
+ lo = gpu_periph_read(gpu, dbg_offset);
+ hi = gpu_periph_read(gpu, dbg_offset + 1);
+
+ return (hi << 32) | lo;
+}
+
+#define CP_PERIPH_IB1_BASE_LO 0x7005
+#define CP_PERIPH_IB1_BASE_HI 0x7006
+#define CP_PERIPH_IB1_SIZE 0x7007
+#define CP_PERIPH_IB1_OFFSET 0x7008
+#define CP_PERIPH_IB2_BASE_LO 0x7009
+#define CP_PERIPH_IB2_BASE_HI 0x700a
+#define CP_PERIPH_IB2_SIZE 0x700b
+#define CP_PERIPH_IB2_OFFSET 0x700c
+#define CP_PERIPH_IB3_BASE_LO 0x700d
+#define CP_PERIPH_IB3_BASE_HI 0x700e
+#define CP_PERIPH_IB3_SIZE 0x700f
+#define CP_PERIPH_IB3_OFFSET 0x7010
+
+static void a8xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ unsigned long flags;
+
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A8XX_RBBM_MISC_STATUS) & A8XX_RBBM_MISC_STATUS_SMMU_STALLED_ON_FAULT)
+ return;
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ if (!adreno_has_gmu_wrapper(adreno_gpu))
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X gfx_status %8.8X\n",
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
+ gpu_read(gpu, REG_A8XX_RBBM_STATUS), gpu_read(gpu, REG_A8XX_RBBM_GFX_STATUS));
+
+ a8xx_aperture_acquire(gpu, PIPE_BR, &flags);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "BR: status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x ib3 %16.16llX/%4.4x\n",
+ gpu_read(gpu, REG_A8XX_RBBM_GFX_BR_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_periph_read64(gpu, CP_PERIPH_IB1_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB1_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB2_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB2_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB3_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB3_OFFSET));
+
+ a8xx_aperture_release(gpu, flags);
+ a8xx_aperture_acquire(gpu, PIPE_BV, &flags);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "BV: status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x ib3 %16.16llX/%4.4x\n",
+ gpu_read(gpu, REG_A8XX_RBBM_GFX_BV_STATUS),
+ gpu_read(gpu, REG_A8XX_CP_RB_RPTR_BV),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_periph_read64(gpu, CP_PERIPH_IB1_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB1_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB2_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB2_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB3_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB3_OFFSET));
+
+ a8xx_aperture_release(gpu, flags);
+ a8xx_aperture_clear(gpu);
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ timer_delete(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static void a8xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
+{
+ u32 status;
+
+ status = gpu_read(gpu, REG_A8XX_RBBM_SW_FUSE_INT_STATUS);
+ gpu_write(gpu, REG_A8XX_RBBM_SW_FUSE_INT_MASK, 0);
+
+ dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status);
+
+ /*
+ * Ignore FASTBLEND violations, because the HW will silently fall back
+ * to legacy blending.
+ */
+ if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING |
+ A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) {
+ timer_delete(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ }
+}
+
+irqreturn_t a8xx_irq(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ u32 status = gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A8XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a8xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR) {
+ u32 rl0, rl1;
+
+ rl0 = gpu_read(gpu, REG_A8XX_CP_RL_ERROR_DETAILS_0);
+ rl1 = gpu_read(gpu, REG_A8XX_CP_RL_ERROR_DETAILS_1);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | AHB bus error RL_ERROR_0: %x, RL_ERROR_1: %x\n", rl0, rl1);
+ }
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a8xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Trap interrupt\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
+ a8xx_sw_fuse_violation_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ msm_gpu_retire(gpu);
+ a6xx_preempt_trigger(gpu);
+ }
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
+ a6xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+
+ gpu_scid &= GENMASK(5, 0);
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
+ FIELD_PREP(GENMASK(29, 24), gpu_scid) |
+ FIELD_PREP(GENMASK(23, 18), gpu_scid) |
+ FIELD_PREP(GENMASK(17, 12), gpu_scid) |
+ FIELD_PREP(GENMASK(11, 6), gpu_scid) |
+ FIELD_PREP(GENMASK(5, 0), gpu_scid));
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
+ FIELD_PREP(GENMASK(27, 22), gpu_scid) |
+ FIELD_PREP(GENMASK(21, 16), gpu_scid) |
+ FIELD_PREP(GENMASK(15, 10), gpu_scid) |
+ BIT(8));
+ }
+
+ llcc_slice_activate(a6xx_gpu->htw_llc_slice);
+}
+
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
+#define VBIF_RESET_ACK_MASK 0xF0
+#define GPR0_GBIF_HALT_REQUEST 0x1E0
+
+void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (gx_off) {
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT_ACK) & 1);
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
+int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ *value = gpu_read64(gpu, REG_A8XX_CP_ALWAYS_ON_COUNTER);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return 0;
+}
+
+u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles;
+
+ /* 19.2MHz */
+ *out_sample_rate = 19200000;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A8XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A8XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ return busy_cycles;
+}
+
+bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ return true;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 28f744f3caf7..554d746f115b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -34,6 +34,7 @@ extern const struct adreno_gpulist a4xx_gpulist;
extern const struct adreno_gpulist a5xx_gpulist;
extern const struct adreno_gpulist a6xx_gpulist;
extern const struct adreno_gpulist a7xx_gpulist;
+extern const struct adreno_gpulist a8xx_gpulist;
static const struct adreno_gpulist *gpulists[] = {
&a2xx_gpulist,
@@ -42,6 +43,7 @@ static const struct adreno_gpulist *gpulists[] = {
&a5xx_gpulist,
&a6xx_gpulist,
&a7xx_gpulist,
+ &a8xx_gpulist,
};
static const struct adreno_info *adreno_info(uint32_t chip_id)
@@ -235,7 +237,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
priv->has_cached_coherent =
!!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
- gpu = info->init(drm);
+ gpu = info->funcs->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
return PTR_ERR(gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
index 04b49d385f9d..d513e03fef08 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
@@ -82,85 +82,85 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
};
static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
- {A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_1, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_0_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_1_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_2_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_3_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_4_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_5_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_6_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_7_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_CB_RAM, 0x390, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_TAG, 0x90, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_2, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_TMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_SMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_STATE_DATA, 0x40, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_8_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_9_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_10_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_11_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_12_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_TP0_TMO_DATA, 0x200, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_SMO_DATA, 0x80, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_1, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_0_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_1_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_2_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_3_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_4_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_5_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_6_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_7_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_CB_RAM, 0x390, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_TAG, 0x90, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_2, 0x200, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_TMO_TAG, 0x80, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_SMO_TAG, 0x80, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_STATE_DATA, 0x40, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_8_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_9_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_10_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_11_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_12_DATA, 0x200, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
};
static const u32 gen7_0_0_pre_crashdumper_gpu_registers[] = {
@@ -303,7 +303,7 @@ static const u32 gen7_0_0_noncontext_rb_rbp_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_rb_rbp_pipe_br_registers), 8));
-/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BR */
+/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: PIPE_BR */
static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = {
0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
@@ -313,7 +313,7 @@ static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_br_registers), 8));
-/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BV */
+/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: PIPE_BV */
static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = {
0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
@@ -323,7 +323,7 @@ static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_bv_registers), 8));
-/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */
static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = {
0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
0x09b00, 0x09b08,
@@ -331,7 +331,7 @@ static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_br_registers), 8));
-/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */
static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = {
0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
0x09b00, 0x09b08,
@@ -339,7 +339,7 @@ static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_bv_registers), 8));
-/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */
+/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = {
0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811,
0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829,
@@ -355,7 +355,7 @@ static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rac_cluster_ps_pipe_br_registers), 8));
-/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */
+/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = {
0x08800, 0x08801, 0x08803, 0x08803, 0x0880b, 0x0880d, 0x08812, 0x08812,
0x08820, 0x08820, 0x08822, 0x08822, 0x08827, 0x08828, 0x0882a, 0x0882a,
@@ -370,7 +370,7 @@ static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = {
0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7,
0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba,
@@ -381,7 +381,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = {
0x0a9b0, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc,
@@ -390,21 +390,21 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_DP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: HLSQ_DP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers[] = {
0x0a9b1, 0x0a9b1, 0x0a9c6, 0x0a9cb, 0x0a9d4, 0x0a9df,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_DP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: HLSQ_DP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers[] = {
0x0a9b1, 0x0a9b1, 0x0a9d4, 0x0a9df,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = {
0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8,
0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5,
@@ -414,7 +414,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = {
0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9e2, 0x0a9e3,
0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0ab00, 0x0ab00,
@@ -422,7 +422,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = {
0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae,
0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3,
@@ -432,7 +432,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = {
0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3,
0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31, 0x0ab00, 0x0ab01,
@@ -440,7 +440,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = {
0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a,
@@ -453,7 +453,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = {
0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a,
@@ -466,7 +466,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = {
0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831,
0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d,
@@ -477,7 +477,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = {
0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831,
0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d,
@@ -488,7 +488,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = {
0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833,
0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867,
@@ -498,7 +498,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = {
0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833,
0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867,
@@ -508,7 +508,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = {
0x0b180, 0x0b183, 0x0b190, 0x0b195, 0x0b2c0, 0x0b2d5, 0x0b300, 0x0b307,
0x0b309, 0x0b309, 0x0b310, 0x0b310,
@@ -516,35 +516,35 @@ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers[] = {
0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers[] = {
0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers[] = {
0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV */
static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers[] = {
0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC */
static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = {
0x0b180, 0x0b181, 0x0b300, 0x0b301, 0x0b307, 0x0b307, 0x0b309, 0x0b309,
0x0b310, 0x0b310,
@@ -552,84 +552,84 @@ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR */
static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers[] = {
0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV */
static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers[] = {
0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers), 8));
-/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */
static const u32 gen7_0_0_vfd_cluster_fe_pipe_br_registers[] = {
0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_br_registers), 8));
-/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */
static const u32 gen7_0_0_vfd_cluster_fe_pipe_bv_registers[] = {
0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_bv_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */
static const u32 gen7_0_0_vpc_cluster_fe_pipe_br_registers[] = {
0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_br_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */
static const u32 gen7_0_0_vpc_cluster_fe_pipe_bv_registers[] = {
0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_bv_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BR */
+/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: PIPE_BR */
static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers[] = {
0x09101, 0x0910c, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BV */
+/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: PIPE_BV */
static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers[] = {
0x09101, 0x0910c, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BR */
+/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers[] = {
0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BV */
+/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: PIPE_BV */
static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers[] = {
0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers[] = {
0x0ae52, 0x0ae52, 0x0ae60, 0x0ae67, 0x0ae69, 0x0ae73,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: SP_TOP */
static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = {
0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c,
0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3f,
@@ -638,7 +638,7 @@ static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_sp_top_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: uSPTP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: uSPTP */
static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = {
0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c,
0x0ae0f, 0x0ae0f, 0x0ae30, 0x0ae32, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3b,
@@ -647,28 +647,28 @@ static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_usptp_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = {
0x0af88, 0x0af8a,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: SP_TOP */
static const u32 gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers[] = {
0x0af80, 0x0af84,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: uSPTP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: uSPTP */
static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
0x0af80, 0x0af84, 0x0af90, 0x0af92,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
+/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_NONE */
static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
@@ -676,14 +676,14 @@ static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
+/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_BR */
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
0x0b600, 0x0b600,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
+/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_LPAC */
static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = {
0x0b780, 0x0b780,
UINT_MAX, UINT_MAX,
@@ -691,184 +691,184 @@ static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = {
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_lpac_registers), 8));
static const struct gen7_sel_reg gen7_0_0_rb_rac_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x0,
};
static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x9,
};
static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_br_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_bv_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_lpac_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_rb_rac_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_rb_rbp_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
index 772652eb61f3..7897622ea6f7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
@@ -96,87 +96,87 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
};
static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
- {A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_1, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_0_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_1_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_2_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_3_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_4_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_5_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_6_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_7_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_CB_RAM, 0x390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_13_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_14_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_TAG, 0xc0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_2, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_TMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_SMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_STATE_DATA, 0x40, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_8_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_9_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_10_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_11_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_12_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_TP0_TMO_DATA, 0x200, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_SMO_DATA, 0x80, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_1, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_0_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_1_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_2_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_3_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_4_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_5_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_6_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_7_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_CB_RAM, 0x390, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_13_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_14_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_TAG, 0xc0, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_2, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_TMO_TAG, 0x80, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_SMO_TAG, 0x80, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_STATE_DATA, 0x40, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_8_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_9_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_10_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_11_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_12_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
};
static const u32 gen7_2_0_gpu_registers[] = {
@@ -478,182 +478,182 @@ static const u32 gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = {
static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8));
static const struct gen7_sel_reg gen7_2_0_rb_rac_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x0,
};
static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x9,
};
static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_br_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_bv_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_lpac_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_rb_rac_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_rb_rbp_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_2_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_2_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_2_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_2_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_NONE, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 0956dfca1f05..20125d1aa21d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -118,97 +118,97 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
};
static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
- { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_CB_RAM, 0x0390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_TAG, 0x00C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_TMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_SMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_STATE_DATA, 0x0040, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_CB_RAM, 0x0390, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_TAG, 0x00C0, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_TMO_TAG, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_SMO_TAG, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_STATE_DATA, 0x0040, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
};
/*
@@ -226,7 +226,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pre_crashdumper_gpu_registers), 8));
* Block : ['BROADCAST', 'CP', 'GRAS', 'GXCLKCTL']
* Block : ['PC', 'RBBM', 'RDVM', 'UCHE']
* Block : ['VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 196 (Regs:1778)
*/
static const u32 gen7_9_0_gpu_registers[] = {
@@ -290,7 +290,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gxclkctl_registers), 8));
/*
* Block : ['GMUAO', 'GMUCX', 'GMUCX_RAM']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 134 (Regs:429)
*/
static const u32 gen7_9_0_gmu_registers[] = {
@@ -334,7 +334,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmu_registers), 8));
/*
* Block : ['GMUGX']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 44 (Regs:454)
*/
static const u32 gen7_9_0_gmugx_registers[] = {
@@ -355,7 +355,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmugx_registers), 8));
/*
* Block : ['CX_MISC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 7 (Regs:56)
*/
static const u32 gen7_9_0_cx_misc_registers[] = {
@@ -367,7 +367,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_misc_registers), 8));
/*
* Block : ['DBGC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 19 (Regs:155)
*/
static const u32 gen7_9_0_dbgc_registers[] = {
@@ -382,7 +382,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_dbgc_registers), 8));
/*
* Block : ['CX_DBGC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 7 (Regs:75)
*/
static const u32 gen7_9_0_cx_dbgc_registers[] = {
@@ -396,7 +396,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_dbgc_registers), 8));
* Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
* Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
* Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* pairs : 29 (Regs:573)
*/
@@ -417,7 +417,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_br_registers), 8));
* Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
* Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
* Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_NONE
* pairs : 29 (Regs:573)
*/
@@ -438,7 +438,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_bv_registers), 8));
* Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
* Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
* Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* pairs : 2 (Regs:7)
*/
@@ -450,7 +450,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_lpac_registers), 8));
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* pairs : 5 (Regs:37)
*/
@@ -463,7 +463,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rac_registers),
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* pairs : 15 (Regs:66)
*/
@@ -478,7 +478,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rbp_registers),
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_HLSQ_STATE
* pairs : 4 (Regs:28)
@@ -491,7 +491,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_state_regis
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_SP_TOP
* pairs : 10 (Regs:61)
@@ -506,7 +506,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_sp_top_registers
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 12 (Regs:62)
@@ -521,7 +521,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_usptp_registers)
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_HLSQ_DP_STR
* pairs : 2 (Regs:5)
@@ -534,7 +534,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_regi
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_HLSQ_STATE
* pairs : 1 (Regs:5)
@@ -547,7 +547,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_SP_TOP
* pairs : 1 (Regs:6)
@@ -560,7 +560,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 2 (Regs:9)
@@ -573,7 +573,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_usptp_register
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 5 (Regs:29)
@@ -587,7 +587,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_none_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 1 (Regs:1)
@@ -600,7 +600,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_br_usptp_register
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 1 (Regs:1)
@@ -613,7 +613,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_lpac_usptp_regist
/*
* Block : ['GRAS']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_GRAS
* pairs : 14 (Regs:293)
*/
@@ -628,7 +628,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_br_cluster_gras_registers), 8
/*
* Block : ['GRAS']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_GRAS
* pairs : 14 (Regs:293)
*/
@@ -643,7 +643,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_bv_cluster_gras_registers), 8
/*
* Block : ['PC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_FE
* pairs : 6 (Regs:31)
*/
@@ -656,7 +656,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_br_cluster_fe_registers), 8));
/*
* Block : ['PC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_FE
* pairs : 6 (Regs:31)
*/
@@ -669,7 +669,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_bv_cluster_fe_registers), 8));
/*
* Block : ['VFD']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:236)
*/
@@ -681,7 +681,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_br_cluster_fe_registers), 8));
/*
* Block : ['VFD']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:236)
*/
@@ -693,7 +693,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_bv_cluster_fe_registers), 8));
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:18)
*/
@@ -705,7 +705,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_fe_registers), 8));
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_PC_VS
* pairs : 3 (Regs:30)
*/
@@ -717,7 +717,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers), 8
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_VPC_PS
* pairs : 5 (Regs:76)
*/
@@ -730,7 +730,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers),
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:18)
*/
@@ -742,7 +742,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_fe_registers), 8));
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_PC_VS
* pairs : 3 (Regs:30)
*/
@@ -754,7 +754,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers), 8
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_VPC_PS
* pairs : 5 (Regs:76)
*/
@@ -767,7 +767,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers),
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_PS
* pairs : 39 (Regs:133)
*/
@@ -788,7 +788,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rac_registers), 8
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_PS
* pairs : 34 (Regs:100)
*/
@@ -808,7 +808,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers), 8
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_HLSQ_STATE
* pairs : 29 (Regs:215)
@@ -828,7 +828,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_SP_TOP
* pairs : 22 (Regs:73)
@@ -846,7 +846,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 16 (Regs:269)
@@ -862,7 +862,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_register
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_STATE
* pairs : 21 (Regs:334)
@@ -880,7 +880,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_DP
* pairs : 3 (Regs:19)
@@ -893,7 +893,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_regist
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_SP_TOP
* pairs : 18 (Regs:77)
@@ -910,7 +910,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 17 (Regs:333)
@@ -927,7 +927,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_register
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_DP_STR
* pairs : 1 (Regs:6)
@@ -940,7 +940,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_re
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_HLSQ_STATE
* pairs : 28 (Regs:213)
@@ -959,7 +959,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_SP_TOP
* pairs : 21 (Regs:71)
@@ -977,7 +977,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 16 (Regs:266)
@@ -993,7 +993,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_register
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_STATE
* pairs : 14 (Regs:299)
@@ -1009,7 +1009,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_r
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_DP
* pairs : 2 (Regs:13)
@@ -1022,7 +1022,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_regi
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_SP_TOP
* pairs : 9 (Regs:34)
@@ -1037,7 +1037,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_regis
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 11 (Regs:279)
@@ -1052,7 +1052,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 3 (Regs:10)
@@ -1065,7 +1065,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 6 (Regs:42)
@@ -1079,7 +1079,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 3 (Regs:10)
@@ -1092,7 +1092,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 5 (Regs:7)
@@ -1105,192 +1105,192 @@ static const u32 gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers[] = {
static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers), 8));
static const struct gen7_sel_reg gen7_9_0_rb_rac_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0,
};
static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x9,
};
static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_br_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_bv_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_lpac_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_rb_pipe_br_rac_registers, &gen7_9_0_rb_rac_sel, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_rb_pipe_br_rbp_registers, &gen7_9_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_gras_pipe_br_cluster_gras_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_gras_pipe_br_cluster_gras_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_gras_pipe_bv_cluster_gras_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_gras_pipe_bv_cluster_gras_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_pc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_pc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_pc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_pc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vfd_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vfd_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vfd_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vfd_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
};
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_9_0_non_context_sp_pipe_br_sp_top_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_non_context_sp_pipe_br_usptp_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_DP_STR,
gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers, 0xaf80},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers, 0xaf80},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_non_context_sp_pipe_lpac_usptp_registers, 0xaf80},
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_NONE, 0, A7XX_USPTP,
gen7_9_0_non_context_tpl1_pipe_none_usptp_registers, 0xb600},
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_non_context_tpl1_pipe_br_usptp_registers, 0xb600},
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers, 0xb780},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index afaa3cfefd35..1c80909e63ca 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -284,6 +284,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4])
{
+ struct adreno_gpu *adreno_gpu = container_of(gpu, struct adreno_gpu, base);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
const char *type = "UNKNOWN";
@@ -336,6 +337,11 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
+ /* Let any concurrent GMU transactions know that the MMU may be
+ * blocked for a while and they should wait on us.
+ */
+ reinit_completion(&adreno_gpu->fault_coredump_done);
+
fault_info.ttbr0 = info->ttbr0;
fault_info.iova = iova;
fault_info.flags = flags;
@@ -343,18 +349,13 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
fault_info.block = block;
msm_gpu_fault_crashstate_capture(gpu, &fault_info);
+
+ complete_all(&adreno_gpu->fault_coredump_done);
}
return 0;
}
-static bool
-adreno_smmu_has_prr(struct msm_gpu *gpu)
-{
- struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
- return adreno_smmu && adreno_smmu->set_prr_addr;
-}
-
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
@@ -1196,6 +1197,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_has_rgmu(adreno_gpu) ||
adreno_gpu->info->family < ADRENO_6XX_GEN1) {
/*
* This can only be done before devm_pm_opp_of_add_table(), or
@@ -1229,6 +1231,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
+ init_completion(&adreno_gpu->fault_coredump_done);
+ complete_all(&adreno_gpu->fault_coredump_done);
+
pm_runtime_set_autosuspend_delay(dev,
adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 390fa6720d9b..0f8d3de97636 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -27,6 +27,7 @@ enum {
ADRENO_FW_PFP = 1,
ADRENO_FW_GMU = 1, /* a6xx */
ADRENO_FW_GPMU = 2,
+ ADRENO_FW_AQE = 3,
ADRENO_FW_MAX,
};
@@ -50,6 +51,8 @@ enum adreno_family {
ADRENO_7XX_GEN1, /* a730 family */
ADRENO_7XX_GEN2, /* a740 family */
ADRENO_7XX_GEN3, /* a750 family */
+ ADRENO_8XX_GEN1, /* a830 family */
+ ADRENO_8XX_GEN2, /* a840 family */
};
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
@@ -71,9 +74,14 @@ enum adreno_family {
(((_c) >> 8) & 0xff), \
((_c) & 0xff)
+struct adreno_gpu;
+
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ struct msm_gpu *(*init)(struct drm_device *dev);
int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
+ void (*bus_halt)(struct adreno_gpu *adreno_gpu, bool gx_off);
+ int (*mmu_fault_handler)(void *arg, unsigned long iova, int flags, void *data);
};
struct adreno_reglist {
@@ -81,6 +89,13 @@ struct adreno_reglist {
u32 value;
};
+/* Reglist with pipe information */
+struct adreno_reglist_pipe {
+ u32 offset;
+ u32 value;
+ u32 pipe;
+};
+
struct adreno_speedbin {
uint16_t fuse;
uint16_t speedbin;
@@ -101,7 +116,7 @@ struct adreno_info {
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
u64 quirks;
- struct msm_gpu *(*init)(struct drm_device *dev);
+ const struct adreno_gpu_funcs *funcs;
const char *zapfw;
u32 inactive_period;
union {
@@ -180,6 +195,8 @@ struct adreno_gpu {
uint16_t speedbin;
const struct adreno_gpu_funcs *funcs;
+ struct completion fault_coredump_done;
+
/* interesting register offsets to dump: */
const unsigned int *registers;
@@ -392,6 +409,16 @@ static inline int adreno_is_a610(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 610);
}
+static inline int adreno_is_a612(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06010200;
+}
+
+static inline bool adreno_has_rgmu(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a612(gpu);
+}
+
static inline int adreno_is_a618(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 618);
@@ -466,9 +493,9 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
return false;
-
- /* TODO: A612 */
- return adreno_is_a610(gpu) || adreno_is_a702(gpu);
+ return adreno_is_a610(gpu) ||
+ adreno_is_a612(gpu) ||
+ adreno_is_a702(gpu);
}
/* TODO: 615/616 */
@@ -548,6 +575,21 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
adreno_is_a740_family(gpu);
}
+static inline int adreno_is_a8xx(struct adreno_gpu *gpu)
+{
+ return gpu->info->family >= ADRENO_8XX_GEN1;
+}
+
+static inline int adreno_is_x285(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x44070001;
+}
+
+static inline int adreno_is_a840(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x44050a01;
+}
+
/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
#define ADRENO_VM_START 0x100000000ULL
u64 adreno_private_vm_size(struct msm_gpu *gpu);
@@ -673,12 +715,6 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, PKT7(opcode, cnt));
}
-struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
-
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);