summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu/smu13
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu13')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c71
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c158
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h6
4 files changed, 228 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a7167668d189..1c7235935d14 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -58,6 +58,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 533d58e57d05..e0d356f93ab0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -322,7 +322,63 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
+{
+ const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ MetricsTable_t *metrics;
+ int inst, j, k, idx;
+ u32 inst_mask;
+
+ metrics = (MetricsTable_t *)smu_metrics;
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instance */
+ inst = GET_INST(VCN, k);
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(metrics->
+ JpegBusy[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ metrics->VclkFrequency[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ metrics->DclkFrequency[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ metrics->SocclkFrequency[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(metrics->UclkFrequency);
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]);
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+ idx++;
+ }
+
+ return sizeof(*xcp_metrics);
+}
+
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_8 *gpu_metrics =
@@ -334,8 +390,7 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
struct amdgpu_xcp *xcp;
u32 inst_mask;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
+ metrics = (MetricsTable_t *)smu_metrics;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
@@ -416,13 +471,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
+ gpu_metrics->xgmi_write_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -474,7 +532,6 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->firmware_timestamp = metrics->Timestamp;
*table = (void *)gpu_metrics;
- kfree(metrics);
return sizeof(*gpu_metrics);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 7d4ff09be7e8..f00ef7f3f355 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -312,6 +312,11 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551600) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_12_init_caps(struct smu_context *smu)
@@ -392,10 +397,14 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
- if (fw_ver >= 0x00557F01) {
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
}
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -752,6 +761,11 @@ static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
}
dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
}
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
@@ -2529,6 +2543,126 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
return pcie_gen_to_speed(speed_level + 1);
}
+static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
+ void *table)
+{
+ const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ int ret, inst, i, j, k, idx;
+ MetricsTableV0_t *metrics_v0;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
+ struct amdgpu_xcp *xcp;
+ u32 inst_mask;
+ bool per_inst;
+
+ if (!table)
+ return sizeof(*xcp_metrics);
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ if (xcp->id == xcp_id)
+ break;
+ }
+ if (i == adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_v0)
+ return -ENOMEM;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
+ if (ret) {
+ kfree(metrics_v0);
+ return ret;
+ }
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
+ goto out;
+ }
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(VclkFrequency, version)[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(DclkFrequency, version)[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequency, version)[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
+
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency,
+ version)[inst]);
+
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ xcp_metrics->gfx_low_utilization_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
+ }
+ idx++;
+ }
+ }
+out:
+ kfree(metrics_v0);
+
+ return sizeof(*xcp_metrics);
+}
+
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -2542,6 +2676,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
+ ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
@@ -2554,8 +2689,11 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
}
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_get_gpu_metrics(smu, table);
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
+ kfree(metrics_v0);
+ return num_bytes;
+ }
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2670,13 +2808,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
+ gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -3673,6 +3814,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
+ .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index d151bcd0cca7..d38d6d76b1e7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -67,6 +67,7 @@ enum smu_v13_0_6_caps {
SMU_CAP(STATIC_METRICS),
SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(PLDM_VERSION),
SMU_CAP(ALL),
};
@@ -79,7 +80,10 @@ int smu_v13_0_12_get_max_metrics_size(void);
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member, uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics);
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
+ struct amdgpu_xcp *xcp, void *table,
+ void *smu_metrics);
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif