summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c20
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c335
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c55
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c21
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h115
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c263
-rw-r--r--drivers/gpu/drm/xe/Makefile6
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c47
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c17
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c35
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c80
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c58
33 files changed, 1115 insertions, 253 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a1817b4b5173..58c3ffe707d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1678,9 +1678,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
+ int max_size, r;
unsigned int i;
u16 cmd;
- int r;
if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
return 0;
@@ -1726,30 +1726,28 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
/* Limit the BAR size to what is available */
- rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
- rbar_size);
+ max_size = pci_rebar_get_max_size(adev->pdev, 0);
+ if (max_size < 0)
+ return 0;
+ rbar_size = min(max_size, rbar_size);
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
- /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ /* Tear down doorbell as resizing will release BARs */
amdgpu_doorbell_fini(adev);
- if (adev->asic_type >= CHIP_BONAIRE)
- pci_release_resource(adev->pdev, 2);
- pci_release_resource(adev->pdev, 0);
-
- r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ r = pci_resize_resource(adev->pdev, 0, rbar_size,
+ (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
+ : 1 << 2);
if (r == -ENOSPC)
dev_info(adev->dev,
"Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
- pci_assign_unassigned_bus_resources(adev->pdev->bus);
-
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
*/
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 7c89e5e0a277..4db24050edb0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -239,6 +239,8 @@ i915-y += \
display/intel_cdclk.o \
display/intel_cmtg.o \
display/intel_color.o \
+ display/intel_colorop.o \
+ display/intel_color_pipeline.o \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index a217a67ceb43..e7950655434b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -32,6 +32,8 @@
#include "intel_display_utils.h"
#include "intel_dsb.h"
#include "intel_vrr.h"
+#include "skl_universal_plane.h"
+#include "skl_universal_plane_regs.h"
struct intel_color_funcs {
int (*color_check)(struct intel_atomic_state *state,
@@ -87,6 +89,14 @@ struct intel_color_funcs {
* Read config other than LUTs and CSCs, before them. Optional.
*/
void (*get_config)(struct intel_crtc_state *crtc_state);
+
+ /* Plane CSC*/
+ void (*load_plane_csc_matrix)(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+
+ /* Plane Pre/Post CSC */
+ void (*load_plane_luts)(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -609,6 +619,8 @@ static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
if (CTM_COEFF_NEGATIVE(coeff))
c = -c;
+ int_bits = max(int_bits, 1);
+
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
(s64)(BIT(int_bits + frac_bits - 1) - 1));
@@ -3836,6 +3848,266 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
}
}
+static void
+xelpd_load_plane_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_property_blob *blob = plane_state->hw.ctm;
+ struct drm_color_ctm_3x4 *ctm;
+ const u64 *input;
+ u16 coeffs[9] = {};
+ int i, j;
+
+ if (!icl_is_hdr_plane(display, plane) || !blob)
+ return;
+
+ ctm = blob->data;
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0, j = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[j];
+
+ /*
+ * Clamp input value to min/max supported by
+ * hardware.
+ */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[j]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
+
+ /* Skip postoffs */
+ if (!((j + 2) % 4))
+ j += 2;
+ else
+ j++;
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 0),
+ coeffs[0] << 16 | coeffs[1]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 1),
+ coeffs[2] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 2),
+ coeffs[3] << 16 | coeffs[4]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 3),
+ coeffs[5] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 4),
+ coeffs[6] << 16 | coeffs[7]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 5),
+ coeffs[8] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+
+ /*
+ * Conversion from S31.32 to S0.12. BIT[12] is the signed bit
+ */
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 0),
+ ctm_to_twos_complement(input[3], 0, 12));
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 1),
+ ctm_to_twos_complement(input[7], 0, 12));
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 2),
+ ctm_to_twos_complement(input[11], 0, 12));
+}
+
+static void
+xelpd_program_plane_pre_csc_lut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_color_lut32 *pre_csc_lut = plane_state->hw.degamma_lut->data;
+ u32 i, lut_size;
+
+ if (icl_is_hdr_plane(display, plane)) {
+ lut_size = 128;
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+
+ if (pre_csc_lut) {
+ for (i = 0; i < lut_size; i++) {
+ u32 lut_val = drm_color_lut32_extract(pre_csc_lut[i].green, 24);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ lut_val);
+ }
+
+ /* Program the max register to clamp values > 1.0. */
+ /* TODO: Restrict to 0x7ffffff */
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ (1 << 24));
+ } while (i++ > 130);
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
+ }
+
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ 1 << 24);
+ } while (i++ < 130);
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
+ }
+}
+
+static void
+xelpd_program_plane_post_csc_lut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_color_lut32 *post_csc_lut = plane_state->hw.gamma_lut->data;
+ u32 i, lut_size, lut_val;
+
+ if (icl_is_hdr_plane(display, plane)) {
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+ /* TODO: Add macro */
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+ if (post_csc_lut) {
+ lut_size = 32;
+ for (i = 0; i < lut_size; i++) {
+ lut_val = drm_color_lut32_extract(post_csc_lut[i].green, 24);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ lut_val);
+ }
+
+ /* Segment 2 */
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ (1 << 24));
+ } while (i++ < 34);
+ } else {
+ /*TODO: Add for segment 0 */
+ lut_size = 32;
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
+ }
+
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ 1 << 24);
+ } while (i++ < 34);
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0), 0);
+ }
+}
+
+static void
+xelpd_plane_load_luts(struct intel_dsb *dsb, const struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.degamma_lut)
+ xelpd_program_plane_pre_csc_lut(dsb, plane_state);
+
+ if (plane_state->hw.gamma_lut)
+ xelpd_program_plane_post_csc_lut(dsb, plane_state);
+}
+
+static u32 glk_3dlut_10(const struct drm_color_lut32 *color)
+{
+ return REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, drm_color_lut32_extract(color->red, 10)) |
+ REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, drm_color_lut32_extract(color->green, 10)) |
+ REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, drm_color_lut32_extract(color->blue, 10));
+}
+
+static void glk_load_lut_3d(struct intel_dsb *dsb,
+ struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+ const struct drm_color_lut32 *lut = blob->data;
+ int i, lut_size = drm_color_lut32_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
+ drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not loading LUTs\n",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), LUT_3D_AUTO_INCREMENT);
+ for (i = 0; i < lut_size; i++)
+ intel_de_write_dsb(display, dsb, LUT_3D_DATA(pipe), glk_3dlut_10(&lut[i]));
+ intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), 0);
+}
+
+static void glk_lut_3d_commit(struct intel_dsb *dsb, struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 val = 0;
+
+ if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
+ drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not committing change\n",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ if (enable)
+ val = LUT_3D_ENABLE | LUT_3D_READY | LUT_3D_BIND_PLANE_1;
+
+ intel_de_write_dsb(display, dsb, LUT_3D_CTL(pipe), val);
+}
+
static const struct intel_color_funcs chv_color_funcs = {
.color_check = chv_color_check,
.color_commit_arm = i9xx_color_commit_arm,
@@ -3883,6 +4155,8 @@ static const struct intel_color_funcs tgl_color_funcs = {
.lut_equal = icl_lut_equal,
.read_csc = icl_read_csc,
.get_config = skl_get_config,
+ .load_plane_csc_matrix = xelpd_load_plane_csc_matrix,
+ .load_plane_luts = xelpd_plane_load_luts,
};
static const struct intel_color_funcs icl_color_funcs = {
@@ -3963,6 +4237,67 @@ static const struct intel_color_funcs ilk_color_funcs = {
.get_config = ilk_get_config,
};
+void intel_color_plane_commit_arm(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+
+ if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
+ glk_lut_3d_commit(dsb, crtc, !!plane_state->hw.lut_3d);
+}
+
+static void
+intel_color_load_plane_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ if (display->funcs.color->load_plane_csc_matrix)
+ display->funcs.color->load_plane_csc_matrix(dsb, plane_state);
+}
+
+static void
+intel_color_load_plane_luts(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ if (display->funcs.color->load_plane_luts)
+ display->funcs.color->load_plane_luts(dsb, plane_state);
+}
+
+bool
+intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe)
+{
+ if (DISPLAY_VER(display) >= 12)
+ return pipe == PIPE_A || pipe == PIPE_B;
+ else
+ return false;
+}
+
+static void
+intel_color_load_3dlut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+
+ if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
+ glk_load_lut_3d(dsb, crtc, plane_state->hw.lut_3d);
+}
+
+void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.ctm)
+ intel_color_load_plane_csc_matrix(dsb, plane_state);
+ if (plane_state->hw.degamma_lut || plane_state->hw.gamma_lut)
+ intel_color_load_plane_luts(dsb, plane_state);
+ if (plane_state->hw.lut_3d)
+ intel_color_load_3dlut(dsb, plane_state);
+}
+
void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index bf7a12ce9df0..c21b9bdf7bb8 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -13,7 +13,9 @@ struct intel_crtc_state;
struct intel_crtc;
struct intel_display;
struct intel_dsb;
+struct intel_plane_state;
struct drm_property_blob;
+enum pipe;
void intel_color_init_hooks(struct intel_display *display);
int intel_color_init(struct intel_display *display);
@@ -40,5 +42,9 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut);
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
-
+void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+void intel_color_plane_commit_arm(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+bool intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
new file mode 100644
index 000000000000..942d9b9c93ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#include "intel_color.h"
+#include "intel_colorop.h"
+#include "intel_color_pipeline.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "skl_universal_plane.h"
+
+#define MAX_COLOR_PIPELINES 1
+#define PLANE_DEGAMMA_SIZE 128
+#define PLANE_GAMMA_SIZE 32
+
+static
+int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
+ enum pipe pipe)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_display *display = to_intel_display(dev);
+ struct drm_colorop *prev_op;
+ struct intel_colorop *colorop;
+ int ret;
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
+ PLANE_DEGAMMA_SIZE,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+
+ if (ret)
+ return ret;
+
+ list->type = colorop->base.base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
+
+ /* TODO: handle failures and clean up */
+ prev_op = &colorop->base;
+
+ if (DISPLAY_VER(display) >= 35 &&
+ intel_color_crtc_has_3dlut(display, pipe) &&
+ plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
+
+ ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ true);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+
+ prev_op = &colorop->base;
+ }
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
+ ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+ prev_op = &colorop->base;
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
+ PLANE_GAMMA_SIZE,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+
+ return 0;
+}
+
+int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_display *display = to_intel_display(dev);
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ int len = 0;
+ int ret;
+
+ /* Currently expose pipeline only for HDR planes */
+ if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
+ return 0;
+
+ /* Add pipeline consisting of transfer functions */
+ ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
+ if (ret)
+ return ret;
+ len++;
+
+ return drm_plane_create_color_pipeline_property(plane, pipelines, len);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.h b/drivers/gpu/drm/i915/display/intel_color_pipeline.h
new file mode 100644
index 000000000000..a457d306da7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_PIPELINE_H__
+#define __INTEL_COLOR_PIPELINE_H__
+
+struct drm_plane;
+enum pipe;
+
+int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe);
+
+#endif /* __INTEL_COLOR_PIPELINE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h
index 8eb643cfead7..c370b6029369 100644
--- a/drivers/gpu/drm/i915/display/intel_color_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_color_regs.h
@@ -316,4 +316,33 @@
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
+/* 3D LUT */
+#define _LUT_3D_CTL_A 0x490A4
+#define _LUT_3D_CTL_B 0x491A4
+#define LUT_3D_CTL(pipe) _MMIO_PIPE(pipe, _LUT_3D_CTL_A, _LUT_3D_CTL_B)
+#define LUT_3D_ENABLE REG_BIT(31)
+#define LUT_3D_READY REG_BIT(30)
+#define LUT_3D_BINDING_MASK REG_GENMASK(23, 22)
+#define LUT_3D_BIND_PIPE REG_FIELD_PREP(LUT_3D_BINDING_MASK, 0)
+#define LUT_3D_BIND_PLANE_1 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 1)
+#define LUT_3D_BIND_PLANE_2 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 2)
+#define LUT_3D_BIND_PLANE_3 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 3)
+
+#define _LUT_3D_INDEX_A 0x490A8
+#define _LUT_3D_INDEX_B 0x491A8
+#define LUT_3D_INDEX(pipe) _MMIO_PIPE(pipe, _LUT_3D_INDEX_A, _LUT_3D_INDEX_B)
+#define LUT_3D_AUTO_INCREMENT REG_BIT(13)
+#define LUT_3D_INDEX_VALUE_MASK REG_GENMASK(12, 0)
+#define LUT_3D_INDEX_VALUE(x) REG_FIELD_PREP(LUT_3D_INDEX_VALUE_MASK, (x))
+
+#define _LUT_3D_DATA_A 0x490AC
+#define _LUT_3D_DATA_B 0x491AC
+#define LUT_3D_DATA(pipe) _MMIO_PIPE(pipe, _LUT_3D_DATA_A, _LUT_3D_DATA_B)
+#define LUT_3D_DATA_RED_MASK REG_GENMASK(29, 20)
+#define LUT_3D_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define LUT_3D_DATA_BLUE_MASK REG_GENMASK(9, 0)
+#define LUT_3D_DATA_RED(x) REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, (x))
+#define LUT_3D_DATA_GREEN(x) REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, (x))
+#define LUT_3D_DATA_BLUE(x) REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, (x))
+
#endif /* __INTEL_COLOR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_colorop.c b/drivers/gpu/drm/i915/display/intel_colorop.c
new file mode 100644
index 000000000000..f2fc0d8780ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_colorop.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#include "intel_colorop.h"
+
+struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
+{
+ return container_of(colorop, struct intel_colorop, base);
+}
+
+struct intel_colorop *intel_colorop_alloc(void)
+{
+ struct intel_colorop *colorop;
+
+ colorop = kzalloc(sizeof(*colorop), GFP_KERNEL);
+ if (!colorop)
+ return ERR_PTR(-ENOMEM);
+
+ return colorop;
+}
+
+struct intel_colorop *intel_colorop_create(enum intel_color_block id)
+{
+ struct intel_colorop *colorop;
+
+ colorop = intel_colorop_alloc();
+
+ if (IS_ERR(colorop))
+ return colorop;
+
+ colorop->id = id;
+
+ return colorop;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_colorop.h b/drivers/gpu/drm/i915/display/intel_colorop.h
new file mode 100644
index 000000000000..21d58eb9f3d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_colorop.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOROP_H__
+#define __INTEL_COLOROP_H__
+
+#include "intel_display_types.h"
+
+struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
+struct intel_colorop *intel_colorop_alloc(void);
+struct intel_colorop *intel_colorop_create(enum intel_color_block id);
+
+#endif /* __INTEL_COLOROP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7b4fd18c60e2..095a319f8bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7304,6 +7304,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024;
if (!new_crtc_state->use_flipq &&
!new_crtc_state->use_dsb &&
@@ -7314,10 +7315,12 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
* Rough estimate:
* ~64 registers per each plane * 8 planes = 512
* Double that for pipe stuff and other overhead.
+ * ~4913 registers for 3DLUT
+ * ~200 color registers * 3 HDR planes
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
new_crtc_state->use_dsb ||
- new_crtc_state->use_flipq ? 1024 : 16);
+ new_crtc_state->use_flipq ? size : 16);
if (!new_crtc_state->dsb_commit) {
new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
index f0fa27e365ab..cb3c9c665c44 100644
--- a/drivers/gpu/drm/i915/display/intel_display_limits.h
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -138,4 +138,13 @@ enum hpd_pin {
HPD_NUM_PINS
};
+enum intel_color_block {
+ INTEL_PLANE_CB_PRE_CSC_LUT,
+ INTEL_PLANE_CB_CSC,
+ INTEL_PLANE_CB_POST_CSC_LUT,
+ INTEL_PLANE_CB_3DLUT,
+
+ INTEL_CB_MAX
+};
+
#endif /* __INTEL_DISPLAY_LIMITS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 38702a9e0f50..06bf8f7c0989 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -646,6 +646,7 @@ struct intel_plane_state {
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
enum drm_scaling_filter scaling_filter;
+ struct drm_property_blob *ctm, *degamma_lut, *gamma_lut, *lut_3d;
} hw;
struct i915_vma *ggtt_vma;
@@ -1391,6 +1392,9 @@ struct intel_crtc_state {
u8 silence_period_sym_clocks;
u8 lfps_half_cycle_num_of_syms;
} alpm_state;
+
+ /* to track changes in plane color blocks */
+ bool plane_color_changed;
};
enum intel_pipe_crc_source {
@@ -1985,6 +1989,11 @@ struct intel_dp_mst_encoder {
struct intel_connector *connector;
};
+struct intel_colorop {
+ struct drm_colorop base;
+ enum intel_color_block id;
+};
+
static inline struct intel_encoder *
intel_attached_encoder(struct intel_connector *connector)
{
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 5105e3278bc4..ab6a58530b39 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -49,6 +49,7 @@
#include "i9xx_plane_regs.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
+#include "intel_colorop.h"
#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -336,6 +337,58 @@ intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
*damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
}
+static bool
+intel_plane_colorop_replace_blob(struct intel_plane_state *plane_state,
+ struct intel_colorop *intel_colorop,
+ struct drm_property_blob *blob)
+{
+ if (intel_colorop->id == INTEL_PLANE_CB_CSC)
+ return drm_property_replace_blob(&plane_state->hw.ctm, blob);
+ else if (intel_colorop->id == INTEL_PLANE_CB_PRE_CSC_LUT)
+ return drm_property_replace_blob(&plane_state->hw.degamma_lut, blob);
+ else if (intel_colorop->id == INTEL_PLANE_CB_POST_CSC_LUT)
+ return drm_property_replace_blob(&plane_state->hw.gamma_lut, blob);
+ else if (intel_colorop->id == INTEL_PLANE_CB_3DLUT)
+ return drm_property_replace_blob(&plane_state->hw.lut_3d, blob);
+
+ return false;
+}
+
+static void
+intel_plane_color_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state,
+ struct intel_crtc *crtc)
+{
+ struct drm_colorop *iter_colorop, *colorop;
+ struct drm_colorop_state *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->uapi.state;
+ struct intel_colorop *intel_colorop;
+ struct drm_property_blob *blob;
+ struct intel_atomic_state *intel_atomic_state = to_intel_atomic_state(state);
+ struct intel_crtc_state *new_crtc_state = intel_atomic_state ?
+ intel_atomic_get_new_crtc_state(intel_atomic_state, crtc) : NULL;
+ bool changed = false;
+ int i = 0;
+
+ iter_colorop = plane_state->uapi.color_pipeline;
+
+ while (iter_colorop) {
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == iter_colorop) {
+ blob = new_colorop_state->bypass ? NULL : new_colorop_state->data;
+ intel_colorop = to_intel_colorop(colorop);
+ changed |= intel_plane_colorop_replace_blob(plane_state,
+ intel_colorop,
+ blob);
+ }
+ }
+ iter_colorop = iter_colorop->next;
+ }
+
+ if (new_crtc_state && changed)
+ new_crtc_state->plane_color_changed = true;
+}
+
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc)
@@ -364,6 +417,8 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
+
+ intel_plane_color_copy_uapi_to_hw_state(plane_state, from_plane_state, crtc);
}
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 89c8003ccfe7..ee8e24497d2c 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -11,6 +11,8 @@
#include "pxp/intel_pxp.h"
#include "intel_bo.h"
+#include "intel_color.h"
+#include "intel_color_pipeline.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
@@ -1275,6 +1277,18 @@ static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
if (plane_state->force_black)
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+ if (plane_state->hw.degamma_lut)
+ plane_color_ctl |= PLANE_COLOR_PRE_CSC_GAMMA_ENABLE;
+
+ if (plane_state->hw.ctm)
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+
+ if (plane_state->hw.gamma_lut) {
+ plane_color_ctl &= ~PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ if (drm_color_lut32_size(plane_state->hw.gamma_lut) != 32)
+ plane_color_ctl |= PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE;
+ }
+
return plane_color_ctl;
}
@@ -1556,6 +1570,8 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
+ intel_color_plane_program_pipeline(dsb, plane_state);
+
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
crtc_x = 0;
@@ -1657,6 +1673,8 @@ icl_plane_update_arm(struct intel_dsb *dsb,
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
+ intel_color_plane_commit_arm(dsb, plane_state);
+
/*
* In order to have FBC for fp16 formats pixel normalizer block must be
* active. Check if pixel normalizer block need to be enabled for FBC.
@@ -3001,6 +3019,9 @@ skl_universal_plane_create(struct intel_display *display,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (DISPLAY_VER(display) >= 12)
+ intel_color_pipeline_plane_init(&plane->base, pipe);
+
drm_plane_create_alpha_property(&plane->base);
drm_plane_create_blend_mode_property(&plane->base,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index 6f815b231340..6fd4da9f63cf 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -254,6 +254,8 @@
#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */
#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */
+#define PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE REG_BIT(15) /* TGL+ */
+#define PLANE_COLOR_PRE_CSC_GAMMA_ENABLE REG_BIT(14)
#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17)
#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1)
@@ -290,6 +292,119 @@
_PLANE_INPUT_CSC_POSTOFF_HI_1_A, _PLANE_INPUT_CSC_POSTOFF_HI_1_B, \
_PLANE_INPUT_CSC_POSTOFF_HI_2_A, _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
+
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A 0x70160
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B 0x71160
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A 0x70260
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B 0x71260
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A 0x70164
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B 0x71164
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A 0x70264
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B 0x71264
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_SEG0_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A 0x701d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B 0x711d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A 0x702d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B 0x712d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_A 0x701dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_B 0x711dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_A 0x702dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_B 0x712dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_DATA_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_DATA_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_INDEX_1_A 0x704d8
+#define _PLANE_POST_CSC_GAMC_INDEX_1_B 0x714d8
+#define _PLANE_POST_CSC_GAMC_INDEX_2_A 0x705d8
+#define _PLANE_POST_CSC_GAMC_INDEX_2_B 0x715d8
+#define _PLANE_POST_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_1_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_1_B)
+#define _PLANE_POST_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_2_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_2_B)
+#define PLANE_POST_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_1(pipe), \
+ _PLANE_POST_CSC_GAMC_INDEX_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_DATA_1_A 0x704dc
+#define _PLANE_POST_CSC_GAMC_DATA_1_B 0x714dc
+#define _PLANE_POST_CSC_GAMC_DATA_2_A 0x705dc
+#define _PLANE_POST_CSC_GAMC_DATA_2_B 0x715dc
+#define _PLANE_POST_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_1_A, \
+ _PLANE_POST_CSC_GAMC_DATA_1_B)
+#define _PLANE_POST_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_2_A, \
+ _PLANE_POST_CSC_GAMC_DATA_2_B)
+#define PLANE_POST_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_1(pipe), \
+ _PLANE_POST_CSC_GAMC_DATA_2(pipe))
+
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A 0x701d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B 0x711d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A 0x702d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B 0x712d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B)
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B)
+#define PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe))
+#define PLANE_PAL_PREC_AUTO_INCREMENT REG_BIT(10)
+
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A 0x701d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B 0x711d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A 0x702d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B 0x712d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B)
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B)
+#define PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe))
+
+#define _PLANE_PRE_CSC_GAMC_INDEX_1_A 0x704d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_1_B 0x714d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_2_A 0x705d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_2_B 0x715d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_1_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_1_B)
+#define _PLANE_PRE_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_2_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_2_B)
+#define PLANE_PRE_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_INDEX_2(pipe))
+
+#define _PLANE_PRE_CSC_GAMC_DATA_1_A 0x704d4
+#define _PLANE_PRE_CSC_GAMC_DATA_1_B 0x714d4
+#define _PLANE_PRE_CSC_GAMC_DATA_2_A 0x705d4
+#define _PLANE_PRE_CSC_GAMC_DATA_2_B 0x715d4
+#define _PLANE_PRE_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_1_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_1_B)
+#define _PLANE_PRE_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_2_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_2_B)
+#define PLANE_PRE_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_DATA_2(pipe))
+
#define _PLANE_CSC_RY_GY_1_A 0x70210
#define _PLANE_CSC_RY_GY_2_A 0x70310
#define _PLANE_CSC_RY_GY_1_B 0x71210
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 890183de2277..a30060fd4429 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -20,16 +20,6 @@
#include "gt/intel_gt_regs.h"
#ifdef CONFIG_64BIT
-static void _release_bars(struct pci_dev *pdev)
-{
- int resno;
-
- for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) {
- if (pci_resource_len(pdev, resno))
- pci_release_resource(pdev, resno);
- }
-}
-
static void
_resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
{
@@ -37,9 +27,7 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
int bar_size = pci_rebar_bytes_to_size(size);
int ret;
- _release_bars(pdev);
-
- ret = pci_resize_resource(pdev, resno, bar_size);
+ ret = pci_resize_resource(pdev, resno, bar_size, 0);
if (ret) {
drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n",
resno, 1 << bar_size, ERR_PTR(ret));
@@ -63,16 +51,12 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
if (i915->params.lmem_bar_size) {
- u32 bar_sizes;
-
- rebar_size = i915->params.lmem_bar_size *
- (resource_size_t)SZ_1M;
- bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
-
+ rebar_size = i915->params.lmem_bar_size * (resource_size_t)SZ_1M;
if (rebar_size == current_size)
return;
- if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
+ if (!pci_rebar_size_supported(pdev, GEN12_LMEM_BAR,
+ pci_rebar_bytes_to_size(rebar_size)) ||
rebar_size >= roundup_pow_of_two(lmem_size)) {
rebar_size = lmem_size;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index bbeba0d3fca8..3abc9206f1a8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1141,6 +1141,122 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
return func(vgpu, index, start, count, flags, data);
}
+static int intel_vgpu_ioctl_get_region_info(struct vfio_device *vfio_dev,
+ struct vfio_region_info *info,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ int nr_areas = 1;
+ int cap_type_id;
+ unsigned int i;
+ int ret;
+
+ switch (info->index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = vgpu->gvt->device_info.cfg_space_size;
+ info->flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = vgpu->cfg_space.bar[info->index].size;
+ if (!info->size) {
+ info->flags = 0;
+ break;
+ }
+
+ info->flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = 0;
+ info->flags = 0;
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->flags = VFIO_REGION_INFO_FLAG_CAPS |
+ VFIO_REGION_INFO_FLAG_MMAP |
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ info->size = gvt_aperture_sz(vgpu->gvt);
+
+ sparse = kzalloc(struct_size(sparse, areas, nr_areas),
+ GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->header.version = 1;
+ sparse->nr_areas = nr_areas;
+ cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->areas[0].offset =
+ PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].size = vgpu_aperture_sz(vgpu);
+ break;
+
+ case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = 0;
+ info->flags = 0;
+
+ gvt_dbg_core("get region info bar:%d\n", info->index);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = 0;
+ info->flags = 0;
+
+ gvt_dbg_core("get region info index:%d\n", info->index);
+ break;
+ default: {
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1
+ };
+
+ if (info->index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
+ return -EINVAL;
+ info->index = array_index_nospec(
+ info->index, VFIO_PCI_NUM_REGIONS + vgpu->num_regions);
+
+ i = info->index - VFIO_PCI_NUM_REGIONS;
+
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = vgpu->region[i].size;
+ info->flags = vgpu->region[i].flags;
+
+ cap_type.type = vgpu->region[i].type;
+ cap_type.subtype = vgpu->region[i].subtype;
+
+ ret = vfio_info_add_capability(caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((info->flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
+ ret = -EINVAL;
+ if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP) {
+ ret = vfio_info_add_capability(
+ caps, &sparse->header,
+ struct_size(sparse, areas, sparse->nr_areas));
+ }
+ if (ret) {
+ kfree(sparse);
+ return ret;
+ }
+ }
+
+ kfree(sparse);
+ return 0;
+}
+
static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
unsigned long arg)
{
@@ -1169,152 +1285,6 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
- } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
- struct vfio_region_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned int i;
- int ret;
- struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
- int nr_areas = 1;
- int cap_type_id;
-
- minsz = offsetofend(struct vfio_region_info, offset);
-
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (info.argsz < minsz)
- return -EINVAL;
-
- switch (info.index) {
- case VFIO_PCI_CONFIG_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->gvt->device_info.cfg_space_size;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- break;
- case VFIO_PCI_BAR0_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->cfg_space.bar[info.index].size;
- if (!info.size) {
- info.flags = 0;
- break;
- }
-
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- break;
- case VFIO_PCI_BAR1_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0;
- info.flags = 0;
- break;
- case VFIO_PCI_BAR2_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.flags = VFIO_REGION_INFO_FLAG_CAPS |
- VFIO_REGION_INFO_FLAG_MMAP |
- VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- info.size = gvt_aperture_sz(vgpu->gvt);
-
- sparse = kzalloc(struct_size(sparse, areas, nr_areas),
- GFP_KERNEL);
- if (!sparse)
- return -ENOMEM;
-
- sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
- sparse->header.version = 1;
- sparse->nr_areas = nr_areas;
- cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
- sparse->areas[0].offset =
- PAGE_ALIGN(vgpu_aperture_offset(vgpu));
- sparse->areas[0].size = vgpu_aperture_sz(vgpu);
- break;
-
- case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0;
- info.flags = 0;
-
- gvt_dbg_core("get region info bar:%d\n", info.index);
- break;
-
- case VFIO_PCI_ROM_REGION_INDEX:
- case VFIO_PCI_VGA_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0;
- info.flags = 0;
-
- gvt_dbg_core("get region info index:%d\n", info.index);
- break;
- default:
- {
- struct vfio_region_info_cap_type cap_type = {
- .header.id = VFIO_REGION_INFO_CAP_TYPE,
- .header.version = 1 };
-
- if (info.index >= VFIO_PCI_NUM_REGIONS +
- vgpu->num_regions)
- return -EINVAL;
- info.index =
- array_index_nospec(info.index,
- VFIO_PCI_NUM_REGIONS +
- vgpu->num_regions);
-
- i = info.index - VFIO_PCI_NUM_REGIONS;
-
- info.offset =
- VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->region[i].size;
- info.flags = vgpu->region[i].flags;
-
- cap_type.type = vgpu->region[i].type;
- cap_type.subtype = vgpu->region[i].subtype;
-
- ret = vfio_info_add_capability(&caps,
- &cap_type.header,
- sizeof(cap_type));
- if (ret)
- return ret;
- }
- }
-
- if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
- ret = -EINVAL;
- if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP)
- ret = vfio_info_add_capability(&caps,
- &sparse->header,
- struct_size(sparse, areas,
- sparse->nr_areas));
- if (ret) {
- kfree(sparse);
- return ret;
- }
- }
-
- if (caps.size) {
- info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- info.cap_offset = 0;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- kfree(sparse);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
- }
-
- kfree(caps.buf);
- }
-
- kfree(sparse);
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -1477,6 +1447,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
+ .get_region_info_caps = intel_vgpu_ioctl_get_region_info,
.dma_unmap = intel_vgpu_dma_unmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e4b273b025d2..62be4a5227e4 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -184,6 +184,10 @@ xe-$(CONFIG_PCI_IOV) += \
xe_sriov_pf_sysfs.o \
xe_tile_sriov_pf_debugfs.o
+ifdef CONFIG_XE_VFIO_PCI
+ xe-$(CONFIG_PCI_IOV) += xe_sriov_vfio.o
+endif
+
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
xe-y += tests/xe_kunit_helpers.o
@@ -242,6 +246,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_cdclk.o \
i915-display/intel_cmtg.o \
i915-display/intel_color.o \
+ i915-display/intel_colorop.o \
+ i915-display/intel_color_pipeline.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
i915-display/intel_crtc.o \
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 9955397aaaa9..c7a77a3a9681 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -54,13 +54,14 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
struct drm_sched_job *s_job;
+ bool restore_replay = false;
list_for_each_entry(s_job, &sched->base.pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
- if (to_xe_sched_job(s_job)->skip_emit ||
- (hw_fence && !dma_fence_is_signaled(hw_fence)))
+ restore_replay |= to_xe_sched_job(s_job)->restore_replay;
+ if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
sched->base.ops->run_job(s_job);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 62f6cc45a764..59c5c6b4d994 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -711,7 +711,7 @@ static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
if (num_vfs > 56)
return SZ_64M - SZ_8M;
- return rounddown_pow_of_two(shareable / num_vfs);
+ return rounddown_pow_of_two(div_u64(shareable, num_vfs));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index d5d918ddce4f..3174a8dee779 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -17,6 +17,7 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
@@ -1023,6 +1024,12 @@ static void action_ring_cleanup(void *arg)
ptr_ring_cleanup(r, destroy_pf_packet);
}
+static void pf_gt_migration_check_support(struct xe_gt *gt)
+{
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) < MAKE_GUC_VER(70, 54, 0))
+ xe_sriov_pf_migration_disable(gt_to_xe(gt), "requires GuC version >= 70.54.0");
+}
+
/**
* xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
* @gt: the &xe_gt
@@ -1039,6 +1046,8 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ pf_gt_migration_check_support(gt);
+
if (!pf_migration_supported(gt))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index d4ffdb71ef3d..ed7be50b2f72 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -822,7 +822,7 @@ static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
- if (!job->skip_emit || job->last_replay) {
+ if (!job->restore_replay || job->last_replay) {
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
else
@@ -881,10 +881,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
register_exec_queue(q, GUC_CONTEXT_NORMAL);
- if (!job->skip_emit)
+ if (!job->restore_replay)
q->ring_ops->emit_job(job);
submit_exec_queue(q, job);
- job->skip_emit = false;
+ job->restore_replay = false;
}
/*
@@ -2112,6 +2112,18 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
q->guc->resume_time = 0;
}
+static void lrc_parallel_clear(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ struct iosys_map map = xe_lrc_parallel_map(lrc);
+ int i;
+
+ for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
+ parallel_write(xe, map, wq[i],
+ FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, 0));
+}
+
/*
* This function is quite complex but only real way to ensure no state is lost
* during VF resume flows. The function scans the queue state, make adjustments
@@ -2135,8 +2147,8 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
guc_exec_queue_revert_pending_state_change(guc, q);
if (xe_exec_queue_is_parallel(q)) {
- struct xe_device *xe = guc_to_xe(guc);
- struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
+ /* Pairs with WRITE_ONCE in __xe_exec_queue_init */
+ struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
/*
* NOP existing WQ commands that may contain stale GGTT
@@ -2144,14 +2156,14 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
* seems to get confused if the WQ head/tail pointers are
* adjusted.
*/
- for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
- parallel_write(xe, map, wq[i],
- FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
- FIELD_PREP(WQ_LEN_MASK, 0));
+ if (lrc)
+ lrc_parallel_clear(lrc);
}
job = xe_sched_first_pending_job(sched);
if (job) {
+ job->restore_replay = true;
+
/*
* Adjust software tail so jobs submitted overwrite previous
* position in ring buffer with new GGTT addresses.
@@ -2241,17 +2253,18 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct drm_sched_job *s_job;
struct xe_sched_job *job = NULL;
+ bool restore_replay = false;
- list_for_each_entry(s_job, &sched->base.pending_list, list) {
- job = to_xe_sched_job(s_job);
-
- xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
- q->guc->id, xe_sched_job_seqno(job));
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ restore_replay |= job->restore_replay;
+ if (restore_replay) {
+ xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
+ q->guc->id, xe_sched_job_seqno(job));
- q->ring_ops->emit_job(job);
- job->skip_emit = true;
+ q->ring_ops->emit_job(job);
+ job->restore_replay = true;
+ }
}
if (job)
diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c
index fe3e40145012..afb06598b6e1 100644
--- a/drivers/gpu/drm/xe/xe_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_pagefault.c
@@ -102,7 +102,6 @@ retry_userptr:
/* Lock VM and BOs dma-resv */
xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
- drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
needs_vram == 1);
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 4636e4ef9baa..9c9ea10d994c 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -1223,6 +1223,23 @@ static struct pci_driver xe_pci_driver = {
#endif
};
+/**
+ * xe_pci_to_pf_device() - Get PF &xe_device.
+ * @pdev: the VF &pci_dev device
+ *
+ * Return: pointer to PF &xe_device, NULL otherwise.
+ */
+struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev)
+{
+ struct drm_device *drm;
+
+ drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver);
+ if (IS_ERR(drm))
+ return NULL;
+
+ return to_xe_device(drm);
+}
+
int xe_register_pci_driver(void)
{
return pci_register_driver(&xe_pci_driver);
diff --git a/drivers/gpu/drm/xe/xe_pci.h b/drivers/gpu/drm/xe/xe_pci.h
index 611c1209b14c..11bcc5fe2c5b 100644
--- a/drivers/gpu/drm/xe/xe_pci.h
+++ b/drivers/gpu/drm/xe/xe_pci.h
@@ -6,7 +6,10 @@
#ifndef _XE_PCI_H_
#define _XE_PCI_H_
+struct pci_dev;
+
int xe_register_pci_driver(void);
void xe_unregister_pci_driver(void);
+struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 44924512830f..766922530265 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -726,6 +726,13 @@ static void xe_pm_runtime_lockdep_prime(void)
/**
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
* @xe: xe device instance
+ *
+ * When possible, scope-based runtime PM (through guard(xe_pm_runtime)) is
+ * be preferred over direct usage of this function. Manual get/put handling
+ * should only be used when the function contains goto-based logic which
+ * can break scope-based handling, or when the lifetime of the runtime PM
+ * reference does not match a specific scope (e.g., runtime PM obtained in one
+ * function and released in a different one).
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
@@ -758,6 +765,13 @@ void xe_pm_runtime_put(struct xe_device *xe)
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
* @xe: xe device instance
*
+ * When possible, scope-based runtime PM (through
+ * ACQUIRE(xe_pm_runtime_ioctl, ...)) is be preferred over direct usage of this
+ * function. Manual get/put handling should only be used when the function
+ * contains goto-based logic which can break scope-based handling, or when the
+ * lifetime of the runtime PM reference does not match a specific scope (e.g.,
+ * runtime PM obtained in one function and released in a different one).
+ *
* Returns: Any number greater than or equal to 0 for success, negative error
* code otherwise.
*/
@@ -827,6 +841,13 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
* It will warn if not protected.
* The reference should be put back after this function regardless, since it
* will always bump the usage counter, regardless.
+ *
+ * When possible, scope-based runtime PM (through guard(xe_pm_runtime_noresume))
+ * is be preferred over direct usage of this function. Manual get/put handling
+ * should only be used when the function contains goto-based logic which can
+ * break scope-based handling, or when the lifetime of the runtime PM reference
+ * does not match a specific scope (e.g., runtime PM obtained in one function
+ * and released in a different one).
*/
void xe_pm_runtime_get_noresume(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index f7f89a18b6fc..6b27039e7b2d 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -6,6 +6,7 @@
#ifndef _XE_PM_H_
#define _XE_PM_H_
+#include <linux/cleanup.h>
#include <linux/pm_runtime.h>
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
@@ -37,4 +38,20 @@ int xe_pm_block_on_suspend(struct xe_device *xe);
void xe_pm_might_block_on_suspend(void);
int xe_pm_module_init(void);
+static inline void __xe_pm_runtime_noop(struct xe_device *xe) {}
+
+DEFINE_GUARD(xe_pm_runtime, struct xe_device *,
+ xe_pm_runtime_get(_T), xe_pm_runtime_put(_T))
+DEFINE_GUARD(xe_pm_runtime_noresume, struct xe_device *,
+ xe_pm_runtime_get_noresume(_T), xe_pm_runtime_put(_T))
+DEFINE_GUARD_COND(xe_pm_runtime, _ioctl, xe_pm_runtime_get_ioctl(_T), _RET >= 0)
+
+/*
+ * Used when a function needs to release runtime PM in all possible cases
+ * and error paths, but the wakeref was already acquired by a different
+ * function (i.e., get() has already happened so only a put() is needed).
+ */
+DEFINE_GUARD(xe_pm_runtime_release_only, struct xe_device *,
+ __xe_pm_runtime_noop(_T), xe_pm_runtime_put(_T));
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index d26612abb4ca..7c4c54fe920a 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -63,8 +63,8 @@ struct xe_sched_job {
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
- /** @skip_emit: skip emitting the job */
- bool skip_emit;
+ /** @restore_replay: job being replayed for restore */
+ bool restore_replay;
/** @last_replay: last job being replayed */
bool last_replay;
/** @ptrs: per instance pointers. */
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
index de06cc690fc8..6c4b16409cc9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -46,13 +46,37 @@ bool xe_sriov_pf_migration_supported(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_PF(xe));
- return xe->sriov.pf.migration.supported;
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled;
}
-static bool pf_check_migration_support(struct xe_device *xe)
+/**
+ * xe_sriov_pf_migration_disable() - Turn off SR-IOV VF migration support on PF.
+ * @xe: the &xe_device instance.
+ * @fmt: format string for the log message, to be combined with following VAs.
+ */
+void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+ xe_sriov_notice(xe, "migration %s: %pV\n",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG) ?
+ "missing prerequisite" : "disabled",
+ &vaf);
+ va_end(va_args);
+
+ xe->sriov.pf.migration.disabled = true;
+}
+
+static void pf_migration_check_support(struct xe_device *xe)
{
- /* XXX: for now this is for feature enabling only */
- return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+ if (!xe_device_has_memirq(xe))
+ xe_sriov_pf_migration_disable(xe, "requires memory-based IRQ support");
}
static void pf_migration_cleanup(void *arg)
@@ -77,7 +101,8 @@ int xe_sriov_pf_migration_init(struct xe_device *xe)
xe_assert(xe, IS_SRIOV_PF(xe));
- xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
+ pf_migration_check_support(xe);
+
if (!xe_sriov_pf_migration_supported(xe))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
index b806298a0bb6..f8f408df8481 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
@@ -14,6 +14,7 @@ struct xe_sriov_packet;
int xe_sriov_pf_migration_init(struct xe_device *xe);
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
+void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
struct xe_sriov_packet *data);
struct xe_sriov_packet *
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
index 363d673ee1dd..7d9a8a278d91 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
@@ -14,8 +14,8 @@
* struct xe_sriov_pf_migration - Xe device level VF migration data
*/
struct xe_sriov_pf_migration {
- /** @supported: indicates whether VF migration feature is supported */
- bool supported;
+ /** @disabled: indicates whether VF migration feature is disabled */
+ bool disabled;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c
new file mode 100644
index 000000000000..e9a7615bb5c5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/intel/xe_sriov_vfio.h>
+#include <linux/cleanup.h>
+
+#include "xe_pci.h"
+#include "xe_pm.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
+
+struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev)
+{
+ return xe_pci_to_pf_device(pdev);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
+
+bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+
+ return xe_sriov_pf_migration_supported(xe);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci");
+
+#define DEFINE_XE_SRIOV_VFIO_FUNCTION(_type, _func, _impl) \
+_type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \
+{ \
+ if (!IS_SRIOV_PF(xe)) \
+ return -EPERM; \
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe)) \
+ return -EINVAL; \
+ \
+ guard(xe_pm_runtime_noresume)(xe); \
+ \
+ return xe_sriov_pf_##_impl(xe, vfid); \
+} \
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci")
+
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_exit, control_finish_save_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_enter, control_trigger_restore_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_exit, control_finish_restore_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, error, control_stop_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(ssize_t, stop_copy_size, migration_size);
+
+ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
+ return -EINVAL;
+
+ guard(xe_pm_runtime_noresume)(xe);
+
+ return xe_sriov_pf_migration_read(xe, vfid, buf, len);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
+
+ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
+ return -EINVAL;
+
+ guard(xe_pm_runtime_noresume)(xe);
+
+ return xe_sriov_pf_migration_write(xe, vfid, buf, len);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 0e10da790cc5..d50baefcd124 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -25,39 +25,13 @@
#include "xe_vram.h"
#include "xe_vram_types.h"
-#define BAR_SIZE_SHIFT 20
-
-/*
- * Release all the BARs that could influence/block LMEMBAR resizing, i.e.
- * assigned IORESOURCE_MEM_64 BARs
- */
-static void release_bars(struct pci_dev *pdev)
-{
- struct resource *res;
- int i;
-
- pci_dev_for_each_resource(pdev, res, i) {
- /* Resource already un-assigned, do not reset it */
- if (!res->parent)
- continue;
-
- /* No need to release unrelated BARs */
- if (!(res->flags & IORESOURCE_MEM_64))
- continue;
-
- pci_release_resource(pdev, i);
- }
-}
-
static void resize_bar(struct xe_device *xe, int resno, resource_size_t size)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int bar_size = pci_rebar_bytes_to_size(size);
int ret;
- release_bars(pdev);
-
- ret = pci_resize_resource(pdev, resno, bar_size);
+ ret = pci_resize_resource(pdev, resno, bar_size, 0);
if (ret) {
drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support in your BIOS\n",
resno, 1 << bar_size, ERR_PTR(ret));
@@ -79,41 +53,37 @@ void xe_vram_resize_bar(struct xe_device *xe)
resource_size_t current_size;
resource_size_t rebar_size;
struct resource *root_res;
- u32 bar_size_mask;
+ int max_size, i;
u32 pci_cmd;
- int i;
/* gather some relevant info */
current_size = pci_resource_len(pdev, LMEM_BAR);
- bar_size_mask = pci_rebar_get_possible_sizes(pdev, LMEM_BAR);
-
- if (!bar_size_mask)
- return;
if (force_vram_bar_size < 0)
return;
/* set to a specific size? */
if (force_vram_bar_size) {
- u32 bar_size_bit;
+ rebar_size = pci_rebar_bytes_to_size(force_vram_bar_size *
+ (resource_size_t)SZ_1M);
- rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M;
-
- bar_size_bit = bar_size_mask & BIT(pci_rebar_bytes_to_size(rebar_size));
-
- if (!bar_size_bit) {
+ if (!pci_rebar_size_supported(pdev, LMEM_BAR, rebar_size)) {
drm_info(&xe->drm,
- "Requested size: %lluMiB is not supported by rebar sizes: 0x%x. Leaving default: %lluMiB\n",
- (u64)rebar_size >> 20, bar_size_mask, (u64)current_size >> 20);
+ "Requested size: %lluMiB is not supported by rebar sizes: 0x%llx. Leaving default: %lluMiB\n",
+ (u64)pci_rebar_size_to_bytes(rebar_size) >> 20,
+ pci_rebar_get_possible_sizes(pdev, LMEM_BAR),
+ (u64)current_size >> 20);
return;
}
- rebar_size = 1ULL << (__fls(bar_size_bit) + BAR_SIZE_SHIFT);
-
+ rebar_size = pci_rebar_size_to_bytes(rebar_size);
if (rebar_size == current_size)
return;
} else {
- rebar_size = 1ULL << (__fls(bar_size_mask) + BAR_SIZE_SHIFT);
+ max_size = pci_rebar_get_max_size(pdev, LMEM_BAR);
+ if (max_size < 0)
+ return;
+ rebar_size = pci_rebar_size_to_bytes(max_size);
/* only resize if larger than current */
if (rebar_size <= current_size)