diff options
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r-- | include/drm/gpu_scheduler.h | 290 |
1 files changed, 161 insertions, 129 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 95e17504e46a..1a7e377d4cbb 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -71,12 +71,6 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_COUNT }; -/* Used to choose between FIFO and RR job-scheduling */ -extern int drm_sched_policy; - -#define DRM_SCHED_POLICY_RR 0 -#define DRM_SCHED_POLICY_FIFO 1 - /** * struct drm_sched_entity - A wrapper around a job queue (typically * attached to the DRM file_priv). @@ -338,8 +332,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * to schedule the job. */ struct drm_sched_job { - struct spsc_node queue_node; - struct list_head list; + u64 id; + + /** + * @submit_ts: + * + * When the job was pushed into the entity queue. + */ + ktime_t submit_ts; /** * @sched: @@ -349,24 +349,30 @@ struct drm_sched_job { * has finished. */ struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct drm_sched_entity *entity; + enum drm_sched_priority s_priority; u32 credits; + /** @last_dependency: tracks @dependencies as they signal */ + unsigned int last_dependency; + atomic_t karma; + + struct spsc_node queue_node; + struct list_head list; /* * work is used only after finish_cb has been used and will not be * accessed anymore. */ union { - struct dma_fence_cb finish_cb; - struct work_struct work; + struct dma_fence_cb finish_cb; + struct work_struct work; }; - uint64_t id; - atomic_t karma; - enum drm_sched_priority s_priority; - struct drm_sched_entity *entity; struct dma_fence_cb cb; + /** * @dependencies: * @@ -375,26 +381,17 @@ struct drm_sched_job { * drm_sched_job_add_implicit_dependencies(). */ struct xarray dependencies; - - /** @last_dependency: tracks @dependencies as they signal */ - unsigned long last_dependency; - - /** - * @submit_ts: - * - * When the job was pushed into the entity queue. - */ - ktime_t submit_ts; }; -static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, - int threshold) -{ - return s_job && atomic_inc_return(&s_job->karma) > threshold; -} - +/** + * enum drm_gpu_sched_stat - the scheduler's status + * + * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use. + * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded. + * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore. + */ enum drm_gpu_sched_stat { - DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ + DRM_GPU_SCHED_STAT_NONE, DRM_GPU_SCHED_STAT_NOMINAL, DRM_GPU_SCHED_STAT_ENODEV, }; @@ -420,10 +417,36 @@ struct drm_sched_backend_ops { struct drm_sched_entity *s_entity); /** - * @run_job: Called to execute the job once all of the dependencies - * have been resolved. This may be called multiple times, if - * timedout_job() has happened and drm_sched_job_recovery() - * decides to try it again. + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. + * + * @sched_job: the job to run + * + * The deprecated drm_sched_resubmit_jobs() (called by &struct + * drm_sched_backend_ops.timedout_job) can invoke this again with the + * same parameters. Using this is discouraged because it violates + * dma_fence rules, notably dma_fence_init() has to be called on + * already initialized fences for a second time. Moreover, this is + * dangerous because attempts to allocate memory might deadlock with + * memory management code waiting for the reset to complete. + * + * TODO: Document what drivers should do / use instead. + * + * This method is called in a workqueue context - either from the + * submit_wq the driver passed through drm_sched_init(), or, if the + * driver passed NULL, a separate, ordered workqueue the scheduler + * allocated. + * + * Note that the scheduler expects to 'inherit' its own reference to + * this fence from the callback. It does not invoke an extra + * dma_fence_get() on it. Consequently, this callback must take a + * reference for the scheduler, and additional ones for the driver's + * respective needs. + * + * Return: + * * On success: dma_fence the driver must signal once the hardware has + * completed the job ("hardware fence"). + * * On failure: NULL or an ERR_PTR. */ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); @@ -431,43 +454,52 @@ struct drm_sched_backend_ops { * @timedout_job: Called when a job has taken too long to execute, * to trigger GPU recovery. * - * This method is called in a workqueue context. + * @sched_job: The job that has timed out + * + * Drivers typically issue a reset to recover from GPU hangs. + * This procedure looks very different depending on whether a firmware + * or a hardware scheduler is being used. + * + * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each + * scheduler has one entity. Hence, the steps taken typically look as + * follows: + * + * 1. Stop the scheduler using drm_sched_stop(). This will pause the + * scheduler workqueues and cancel the timeout work, guaranteeing + * that nothing is queued while the ring is being removed. + * 2. Remove the ring. The firmware will make sure that the + * corresponding parts of the hardware are resetted, and that other + * rings are not impacted. + * 3. Kill the entity and the associated scheduler. + * * - * Drivers typically issue a reset to recover from GPU hangs, and this - * procedure usually follows the following workflow: + * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from + * one or more entities to one ring. This implies that all entities + * associated with the affected scheduler cannot be torn down, because + * this would effectively also affect innocent userspace processes which + * did not submit faulty jobs (for example). * - * 1. Stop the scheduler using drm_sched_stop(). This will park the - * scheduler thread and cancel the timeout work, guaranteeing that - * nothing is queued while we reset the hardware queue - * 2. Try to gracefully stop non-faulty jobs (optional) - * 3. Issue a GPU reset (driver-specific) - * 4. Re-submit jobs using drm_sched_resubmit_jobs() - * 5. Restart the scheduler using drm_sched_start(). At that point, new - * jobs can be queued, and the scheduler thread is unblocked + * Consequently, the procedure to recover with a hardware scheduler + * should look like this: + * + * 1. Stop all schedulers impacted by the reset using drm_sched_stop(). + * 2. Kill the entity the faulty job stems from. + * 3. Issue a GPU reset on all faulty rings (driver-specific). + * 4. Re-submit jobs on all schedulers impacted by re-submitting them to + * the entities which are still alive. + * 5. Restart all schedulers that were stopped in step #1 using + * drm_sched_start(). * * Note that some GPUs have distinct hardware queues but need to reset * the GPU globally, which requires extra synchronization between the - * timeout handler of the different &drm_gpu_scheduler. One way to - * achieve this synchronization is to create an ordered workqueue - * (using alloc_ordered_workqueue()) at the driver level, and pass this - * queue to drm_sched_init(), to guarantee that timeout handlers are - * executed sequentially. The above workflow needs to be slightly - * adjusted in that case: - * - * 1. Stop all schedulers impacted by the reset using drm_sched_stop() - * 2. Try to gracefully stop non-faulty jobs on all queues impacted by - * the reset (optional) - * 3. Issue a GPU reset on all faulty queues (driver-specific) - * 4. Re-submit jobs on all schedulers impacted by the reset using - * drm_sched_resubmit_jobs() - * 5. Restart all schedulers that were stopped in step #1 using - * drm_sched_start() + * timeout handlers of different schedulers. One way to achieve this + * synchronization is to create an ordered workqueue (using + * alloc_ordered_workqueue()) at the driver level, and pass this queue + * as drm_sched_init()'s @timeout_wq parameter. This will guarantee + * that timeout handlers are executed sequentially. * - * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, - * and the underlying driver has started or completed recovery. + * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat * - * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer - * available, i.e. has been unplugged. */ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); @@ -476,19 +508,6 @@ struct drm_sched_backend_ops { * and it's time to clean it up. */ void (*free_job)(struct drm_sched_job *sched_job); - - /** - * @update_job_credits: Called when the scheduler is considering this - * job for execution. - * - * This callback returns the number of credits the job would take if - * pushed to the hardware. Drivers may use this to dynamically update - * the job's credit count. For instance, deduct the number of credits - * for already signalled native fences. - * - * This callback is optional. - */ - u32 (*update_job_credits)(struct drm_sched_job *sched_job); }; /** @@ -553,18 +572,66 @@ struct drm_gpu_scheduler { struct device *dev; }; +/** + * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler + * + * @ops: backend operations provided by the driver + * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is + * allocated and used. + * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT, + * as there's usually one run-queue per priority, but may be less. + * @credit_limit: the number of credits this scheduler can hold from all jobs + * @hang_limit: number of times to allow a job to hang before dropping it. + * This mechanism is DEPRECATED. Set it to 0. + * @timeout: timeout value in jiffies for submitted jobs. + * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used. + * @score: score atomic shared with other schedulers. May be NULL. + * @name: name (typically the driver's name). Used for debugging + * @dev: associated device. Used for debugging + */ +struct drm_sched_init_args { + const struct drm_sched_backend_ops *ops; + struct workqueue_struct *submit_wq; + struct workqueue_struct *timeout_wq; + u32 num_rqs; + u32 credit_limit; + unsigned int hang_limit; + long timeout; + atomic_t *score; + const char *name; + struct device *dev; +}; + +/* Scheduler operations */ + int drm_sched_init(struct drm_gpu_scheduler *sched, - const struct drm_sched_backend_ops *ops, - struct workqueue_struct *submit_wq, - u32 num_rqs, u32 credit_limit, unsigned int hang_limit, - long timeout, struct workqueue_struct *timeout_wq, - atomic_t *score, const char *name, struct device *dev); + const struct drm_sched_init_args *args); void drm_sched_fini(struct drm_gpu_scheduler *sched); + +unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); +void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, + unsigned long remaining); +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno); +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); +void drm_sched_fault(struct drm_gpu_scheduler *sched); + +struct drm_gpu_scheduler * +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + +/* Jobs */ + int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, u32 credits, void *owner); void drm_sched_job_arm(struct drm_sched_job *job); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, @@ -577,35 +644,18 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, struct drm_gem_object *obj, bool write); - - -void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, - struct drm_gpu_scheduler **sched_list, - unsigned int num_sched_list); - -void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); +bool drm_sched_job_has_dependency(struct drm_sched_job *job, + struct dma_fence *fence); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup(struct drm_gpu_scheduler *sched); -bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); -void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); -void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); -void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); -void drm_sched_start(struct drm_gpu_scheduler *sched, int errno); -void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_increase_karma(struct drm_sched_job *bad); -void drm_sched_reset_karma(struct drm_sched_job *bad); -void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); -bool drm_sched_dependency_optimized(struct dma_fence* fence, - struct drm_sched_entity *entity); -void drm_sched_fault(struct drm_gpu_scheduler *sched); -void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); -void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return s_job && atomic_inc_return(&s_job->karma) > threshold; +} -void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, - struct drm_sched_rq *rq, ktime_t ts); +/* Entities */ int drm_sched_entity_init(struct drm_sched_entity *entity, enum drm_sched_priority priority, @@ -615,29 +665,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); -void drm_sched_entity_select_rq(struct drm_sched_entity *entity); -struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); -void drm_sched_entity_push_job(struct drm_sched_job *sched_job); void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); -bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); int drm_sched_entity_error(struct drm_sched_entity *entity); - -struct drm_sched_fence *drm_sched_fence_alloc( - struct drm_sched_entity *s_entity, void *owner); -void drm_sched_fence_init(struct drm_sched_fence *fence, - struct drm_sched_entity *entity); -void drm_sched_fence_free(struct drm_sched_fence *fence); - -void drm_sched_fence_scheduled(struct drm_sched_fence *fence, - struct dma_fence *parent); -void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); - -unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); -void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, - unsigned long remaining); -struct drm_gpu_scheduler * -drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, - unsigned int num_sched_list); +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); #endif |