summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_lrc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c369
1 files changed, 167 insertions, 202 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index debed011a958..70c699043d0e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -789,9 +789,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
- if (intel_engine_stopped(engine))
- return 0;
-
/* We keep the previous context alive until we retire the following
* request. This ensures that any the context object is still pinned
* for any residual writes the HW makes into it on the context switch
@@ -826,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
@@ -902,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
struct drm_i915_gem_request *req, *tmp;
LIST_HEAD(cancel_list);
- WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
spin_lock_bh(&engine->execlist_lock);
list_replace_init(&engine->execlist_queue, &cancel_list);
@@ -929,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
/* TODO: Is this correct with Execlists enabled? */
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ RING_MI_MODE(engine->mmio_base),
+ MODE_IDLE, MODE_IDLE,
+ 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
@@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
u32 *lrc_reg_state;
int ret;
- lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (ce->pin_count++)
return 0;
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
{
struct intel_context *ce = &ctx->engine[engine->id];
- lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(ce->pin_count == 0);
if (--ce->pin_count)
@@ -1296,6 +1296,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
}
+
+ /* WaMediaPoolStateCmdInWABB:bxt */
+ if (HAS_POOLED_EU(engine->i915)) {
+ /*
+ * EU pool configuration is setup along with golden context
+ * during context initialization. This value depends on
+ * device type (2x6 or 3x6) and needs to be updated based
+ * on which subslice is disabled especially for 2x6
+ * devices, however it is safe to load default
+ * configuration of 3x6 device instead of masking off
+ * corresponding bits because HW ignores bits of a disabled
+ * subslice and drops down to appropriate config. Please
+ * see render_state_setup() in i915_gem_render_state.c for
+ * possible configurations, to avoid duplication they are
+ * not shown here again.
+ */
+ u32 eu_pool_config = 0x00777000;
+ wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
+ wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
+ wa_ctx_emit(batch, index, eu_pool_config);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
+
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1353,8 +1378,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
int ret;
- engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
- PAGE_ALIGN(size));
+ engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
+ PAGE_ALIGN(size));
if (IS_ERR(engine->wa_ctx.obj)) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
ret = PTR_ERR(engine->wa_ctx.obj);
@@ -1614,36 +1639,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1780,16 +1787,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
return 0;
}
-static u32 gen8_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
{
/*
@@ -1805,14 +1802,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-
- /* See bxt_a_get_seqno() explaining the reason for the clflush. */
- intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -1838,7 +1827,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ intel_logical_ring_emit(ringbuf, request->seqno);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
@@ -1958,6 +1947,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ intel_engine_fini_breadcrumbs(engine);
+
if (engine->status_page.obj) {
i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
@@ -1979,15 +1970,11 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->emit_request = gen8_emit_request;
engine->emit_flush = gen8_emit_flush;
- engine->irq_get = gen8_logical_ring_get_irq;
- engine->irq_put = gen8_logical_ring_put_irq;
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
- engine->get_seqno = gen8_get_seqno;
- engine->set_seqno = gen8_set_seqno;
- if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
- engine->set_seqno = bxt_a_set_seqno;
- }
}
static inline void
@@ -1995,7 +1982,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
{
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
- init_waitqueue_head(&engine->irq_queue);
}
static int
@@ -2016,12 +2002,94 @@ lrc_setup_hws(struct intel_engine_cs *engine,
return 0;
}
+static int
+logical_ring_init(struct intel_engine_cs *engine)
+{
+ struct i915_gem_context *dctx = engine->i915->kernel_context;
+ int ret;
+
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto error;
+
+ ret = i915_cmd_parser_init_ring(engine);
+ if (ret)
+ goto error;
+
+ ret = execlists_context_deferred_alloc(dctx, engine);
+ if (ret)
+ goto error;
+
+ /* As this is the default context, always pin it */
+ ret = intel_lr_context_pin(dctx, engine);
+ if (ret) {
+ DRM_ERROR("Failed to pin context for %s: %d\n",
+ engine->name, ret);
+ goto error;
+ }
+
+ /* And setup the hardware status page. */
+ ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+ if (ret) {
+ DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ intel_logical_ring_cleanup(engine);
+ return ret;
+}
+
+static int logical_render_ring_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ if (HAS_L3_DPF(dev_priv))
+ engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+ /* Override some for render ring. */
+ if (INTEL_GEN(dev_priv) >= 9)
+ engine->init_hw = gen9_init_render_ring;
+ else
+ engine->init_hw = gen8_init_render_ring;
+ engine->init_context = gen8_init_rcs_context;
+ engine->cleanup = intel_fini_pipe_control;
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_request = gen8_emit_request_render;
+
+ ret = intel_init_pipe_control(engine, 4096);
+ if (ret)
+ return ret;
+
+ ret = intel_init_workaround_bb(engine);
+ if (ret) {
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ DRM_ERROR("WA batch buffer initialization failed: %d\n",
+ ret);
+ }
+
+ ret = logical_ring_init(engine);
+ if (ret) {
+ lrc_destroy_wa_ctx_obj(engine);
+ }
+
+ return ret;
+}
+
static const struct logical_ring_info {
const char *name;
unsigned exec_id;
unsigned guc_id;
u32 mmio_base;
unsigned irq_shift;
+ int (*init)(struct intel_engine_cs *engine);
} logical_rings[] = {
[RCS] = {
.name = "render ring",
@@ -2029,6 +2097,7 @@ static const struct logical_ring_info {
.guc_id = GUC_RENDER_ENGINE,
.mmio_base = RENDER_RING_BASE,
.irq_shift = GEN8_RCS_IRQ_SHIFT,
+ .init = logical_render_ring_init,
},
[BCS] = {
.name = "blitter ring",
@@ -2036,6 +2105,7 @@ static const struct logical_ring_info {
.guc_id = GUC_BLITTER_ENGINE,
.mmio_base = BLT_RING_BASE,
.irq_shift = GEN8_BCS_IRQ_SHIFT,
+ .init = logical_ring_init,
},
[VCS] = {
.name = "bsd ring",
@@ -2043,6 +2113,7 @@ static const struct logical_ring_info {
.guc_id = GUC_VIDEO_ENGINE,
.mmio_base = GEN6_BSD_RING_BASE,
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
+ .init = logical_ring_init,
},
[VCS2] = {
.name = "bsd2 ring",
@@ -2050,6 +2121,7 @@ static const struct logical_ring_info {
.guc_id = GUC_VIDEO_ENGINE2,
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
+ .init = logical_ring_init,
},
[VECS] = {
.name = "video enhancement ring",
@@ -2057,14 +2129,14 @@ static const struct logical_ring_info {
.guc_id = GUC_VIDEOENHANCE_ENGINE,
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
+ .init = logical_ring_init,
},
};
static struct intel_engine_cs *
-logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
+logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
{
const struct logical_ring_info *info = &logical_rings[id];
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[id];
enum forcewake_domains fw_domains;
@@ -2107,169 +2179,62 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
logical_ring_default_irqs(engine, info->irq_shift);
intel_engine_init_hangcheck(engine);
- i915_gem_batch_pool_init(dev, &engine->batch_pool);
+ i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
return engine;
}
-static int
-logical_ring_init(struct intel_engine_cs *engine)
-{
- struct i915_gem_context *dctx = engine->i915->kernel_context;
- int ret;
-
- ret = i915_cmd_parser_init_ring(engine);
- if (ret)
- goto error;
-
- ret = execlists_context_deferred_alloc(dctx, engine);
- if (ret)
- goto error;
-
- /* As this is the default context, always pin it */
- ret = intel_lr_context_pin(dctx, engine);
- if (ret) {
- DRM_ERROR("Failed to pin context for %s: %d\n",
- engine->name, ret);
- goto error;
- }
-
- /* And setup the hardware status page. */
- ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
- if (ret) {
- DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
- goto error;
- }
-
- return 0;
-
-error:
- intel_logical_ring_cleanup(engine);
- return ret;
-}
-
-static int logical_render_ring_init(struct drm_device *dev)
-{
- struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
- int ret;
-
- if (HAS_L3_DPF(dev))
- engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
- /* Override some for render ring. */
- if (INTEL_INFO(dev)->gen >= 9)
- engine->init_hw = gen9_init_render_ring;
- else
- engine->init_hw = gen8_init_render_ring;
- engine->init_context = gen8_init_rcs_context;
- engine->cleanup = intel_fini_pipe_control;
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_request = gen8_emit_request_render;
-
- ret = intel_init_pipe_control(engine);
- if (ret)
- return ret;
-
- ret = intel_init_workaround_bb(engine);
- if (ret) {
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- DRM_ERROR("WA batch buffer initialization failed: %d\n",
- ret);
- }
-
- ret = logical_ring_init(engine);
- if (ret) {
- lrc_destroy_wa_ctx_obj(engine);
- }
-
- return ret;
-}
-
-static int logical_bsd_ring_init(struct drm_device *dev)
-{
- struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
-
- return logical_ring_init(engine);
-}
-
-static int logical_bsd2_ring_init(struct drm_device *dev)
-{
- struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
-
- return logical_ring_init(engine);
-}
-
-static int logical_blt_ring_init(struct drm_device *dev)
-{
- struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
-
- return logical_ring_init(engine);
-}
-
-static int logical_vebox_ring_init(struct drm_device *dev)
-{
- struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
-
- return logical_ring_init(engine);
-}
-
/**
* intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
* @dev: DRM device.
*
- * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
- * those engines that are present in the hardware.
+ * This function inits the engines for an Execlists submission style (the
+ * equivalent in the legacy ringbuffer submission world would be
+ * i915_gem_init_engines). It does it only for those engines that are present in
+ * the hardware.
*
* Return: non-zero if the initialization failed.
*/
int intel_logical_rings_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int mask = 0;
+ unsigned int i;
int ret;
- ret = logical_render_ring_init(dev);
- if (ret)
- return ret;
+ WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
- if (HAS_BSD(dev)) {
- ret = logical_bsd_ring_init(dev);
- if (ret)
- goto cleanup_render_ring;
- }
+ for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
+ if (!HAS_ENGINE(dev_priv, i))
+ continue;
- if (HAS_BLT(dev)) {
- ret = logical_blt_ring_init(dev);
- if (ret)
- goto cleanup_bsd_ring;
- }
+ if (!logical_rings[i].init)
+ continue;
- if (HAS_VEBOX(dev)) {
- ret = logical_vebox_ring_init(dev);
+ ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
if (ret)
- goto cleanup_blt_ring;
+ goto cleanup;
+
+ mask |= ENGINE_MASK(i);
}
- if (HAS_BSD2(dev)) {
- ret = logical_bsd2_ring_init(dev);
- if (ret)
- goto cleanup_vebox_ring;
+ /*
+ * Catch failures to update logical_rings table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
+ struct intel_device_info *info =
+ (struct intel_device_info *)&dev_priv->info;
+ info->ring_mask = mask;
}
return 0;
-cleanup_vebox_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
-cleanup_render_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
+cleanup:
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ intel_logical_ring_cleanup(&dev_priv->engine[i]);
return ret;
}
@@ -2546,7 +2511,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
+ ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return PTR_ERR(ctx_obj);