diff options
42 files changed, 858 insertions, 559 deletions
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index 910ffb7051f4..420467a5325c 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -133,11 +133,20 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx) dma_fence_put(fence); } +static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_dev *xdna = hwctx->client->xdna; + + aie2_hwctx_wait_for_idle(hwctx); + aie2_hwctx_stop(xdna, hwctx, NULL); + aie2_hwctx_status_shift_stop(hwctx); + + return 0; +} + void aie2_hwctx_suspend(struct amdxdna_client *client) { struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; /* * Command timeout is unlikely. But if it happens, it doesn't @@ -145,19 +154,20 @@ void aie2_hwctx_suspend(struct amdxdna_client *client) * and abort all commands. */ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - guard(mutex)(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { - aie2_hwctx_wait_for_idle(hwctx); - aie2_hwctx_stop(xdna, hwctx, NULL); - aie2_hwctx_status_shift_stop(hwctx); - } + amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb); } -void aie2_hwctx_resume(struct amdxdna_client *client) +static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_dev *xdna = hwctx->client->xdna; + + aie2_hwctx_status_restore(hwctx); + return aie2_hwctx_restart(xdna, hwctx); +} + +int aie2_hwctx_resume(struct amdxdna_client *client) { struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; /* * The resume path cannot guarantee that mailbox channel can be @@ -165,11 +175,7 @@ void aie2_hwctx_resume(struct amdxdna_client *client) * mailbox channel, error will return. */ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - guard(mutex)(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { - aie2_hwctx_status_restore(hwctx); - aie2_hwctx_restart(xdna, hwctx); - } + return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb); } static void diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 82412eec9a4b..9caad083543d 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6 return 0; } +static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg) +{ + u32 *bitmap = arg; + + *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col); + + return 0; +} + int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled) { DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS); struct amdxdna_dev *xdna = ndev->xdna; struct amdxdna_client *client; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; dma_addr_t dma_addr; u32 aie_bitmap = 0; u8 *buff_addr; - int ret, idx; + int ret; buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr, DMA_FROM_DEVICE, GFP_KERNEL); @@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, return -ENOMEM; /* Go through each hardware context and mark the AIE columns that are active */ - list_for_each_entry(client, &xdna->client_list, node) { - idx = srcu_read_lock(&client->hwctx_srcu); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) - aie_bitmap |= amdxdna_hwctx_col_map(hwctx); - srcu_read_unlock(&client->hwctx_srcu, idx); - } + list_for_each_entry(client, &xdna->client_list, node) + amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map); *cols_filled = 0; req.dump_buff_addr = dma_addr; diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 6fc3191c3097..16ac0cab4f44 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -10,6 +10,7 @@ #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> +#include <linux/cleanup.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/iommu.h> @@ -465,8 +466,11 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna) return ret; } - list_for_each_entry(client, &xdna->client_list, node) - aie2_hwctx_resume(client); + list_for_each_entry(client, &xdna->client_list, node) { + ret = aie2_hwctx_resume(client); + if (ret) + break; + } return ret; } @@ -779,65 +783,56 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client, return ret; } +static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_drm_query_hwctx __user *buf, *tmp __free(kfree) = NULL; + struct amdxdna_drm_get_info *get_info_args = arg; + + if (get_info_args->buffer_size < sizeof(*tmp)) + return -EINVAL; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp->pid = hwctx->client->pid; + tmp->context_id = hwctx->id; + tmp->start_col = hwctx->start_col; + tmp->num_col = hwctx->num_col; + tmp->command_submissions = hwctx->priv->seq; + tmp->command_completions = hwctx->priv->completed; + + buf = u64_to_user_ptr(get_info_args->buffer); + + if (copy_to_user(buf, tmp, sizeof(*tmp))) + return -EFAULT; + + get_info_args->buffer += sizeof(*tmp); + get_info_args->buffer_size -= sizeof(*tmp); + + return 0; +} + static int aie2_get_hwctx_status(struct amdxdna_client *client, struct amdxdna_drm_get_info *args) { - struct amdxdna_drm_query_hwctx __user *buf; struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_drm_query_hwctx *tmp; + struct amdxdna_drm_get_info info_args; struct amdxdna_client *tmp_client; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; - bool overflow = false; - u32 req_bytes = 0; - u32 hw_i = 0; - int ret = 0; - int idx; + int ret; drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; + info_args.buffer = args->buffer; + info_args.buffer_size = args->buffer_size; - buf = u64_to_user_ptr(args->buffer); list_for_each_entry(tmp_client, &xdna->client_list, node) { - idx = srcu_read_lock(&tmp_client->hwctx_srcu); - amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) { - req_bytes += sizeof(*tmp); - if (args->buffer_size < req_bytes) { - /* Continue iterating to get the required size */ - overflow = true; - continue; - } - - memset(tmp, 0, sizeof(*tmp)); - tmp->pid = tmp_client->pid; - tmp->context_id = hwctx->id; - tmp->start_col = hwctx->start_col; - tmp->num_col = hwctx->num_col; - tmp->command_submissions = hwctx->priv->seq; - tmp->command_completions = hwctx->priv->completed; - - if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) { - ret = -EFAULT; - srcu_read_unlock(&tmp_client->hwctx_srcu, idx); - goto out; - } - hw_i++; - } - srcu_read_unlock(&tmp_client->hwctx_srcu, idx); - } - - if (overflow) { - XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.", - args->buffer_size, req_bytes); - ret = -EINVAL; + ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb); + if (ret) + break; } -out: - kfree(tmp); - args->buffer_size = req_bytes; + args->buffer_size = (u32)(info_args.buffer - args->buffer); return ret; } diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index 488d8ee568eb..91a8e948f82a 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -289,7 +289,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx); int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); void aie2_hwctx_suspend(struct amdxdna_client *client); -void aie2_hwctx_resume(struct amdxdna_client *client); +int aie2_hwctx_resume(struct amdxdna_client *client); int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq); diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c index b47a7f8e9017..4bfe4ef20550 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.c +++ b/drivers/accel/amdxdna/amdxdna_ctx.c @@ -68,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx, synchronize_srcu(ss); /* At this point, user is not able to submit new commands */ - mutex_lock(&xdna->dev_lock); xdna->dev_info->ops->hwctx_fini(hwctx); - mutex_unlock(&xdna->dev_lock); kfree(hwctx->name); kfree(hwctx); } +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, + int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)) +{ + struct amdxdna_hwctx *hwctx; + unsigned long hwctx_id; + int ret = 0, idx; + + idx = srcu_read_lock(&client->hwctx_srcu); + amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { + ret = walk(hwctx, arg); + if (ret) + break; + } + srcu_read_unlock(&client->hwctx_srcu, idx); + + return ret; +} + void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size) { struct amdxdna_cmd *cmd = abo->mem.kva; @@ -126,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client) struct amdxdna_hwctx *hwctx; unsigned long hwctx_id; - mutex_lock(&client->hwctx_lock); amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { XDNA_DBG(client->xdna, "PID %d close HW context %d", client->pid, hwctx->id); xa_erase(&client->hwctx_xa, hwctx->id); - mutex_unlock(&client->hwctx_lock); amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu); - mutex_lock(&client->hwctx_lock); } - mutex_unlock(&client->hwctx_lock); } int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -225,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d if (!drm_dev_enter(dev, &idx)) return -ENODEV; + mutex_lock(&xdna->dev_lock); hwctx = xa_erase(&client->hwctx_xa, args->handle); if (!hwctx) { ret = -EINVAL; @@ -241,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle); out: + mutex_unlock(&xdna->dev_lock); drm_dev_exit(idx); return ret; } diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h index c652229547a3..7cd7a55936f0 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.h +++ b/drivers/accel/amdxdna/amdxdna_ctx.h @@ -139,14 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo) void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size); int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); -static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx) -{ - return GENMASK(hwctx->start_col + hwctx->num_col - 1, - hwctx->start_col); -} - void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job); void amdxdna_hwctx_remove_all(struct amdxdna_client *client); +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, + int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)); int amdxdna_cmd_submit(struct amdxdna_client *client, u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt, diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index fbca94183f96..8ef5e4f27f5e 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -81,7 +81,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) ret = -ENODEV; goto unbind_sva; } - mutex_init(&client->hwctx_lock); init_srcu_struct(&client->hwctx_srcu); xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC); mutex_init(&client->mm_lock); @@ -116,7 +115,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp) xa_destroy(&client->hwctx_xa); cleanup_srcu_struct(&client->hwctx_srcu); - mutex_destroy(&client->hwctx_lock); mutex_destroy(&client->mm_lock); if (client->dev_heap) drm_gem_object_put(to_gobj(client->dev_heap)); @@ -142,8 +140,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id) mutex_lock(&xdna->dev_lock); list_del_init(&client->node); - mutex_unlock(&xdna->dev_lock); amdxdna_hwctx_remove_all(client); + mutex_unlock(&xdna->dev_lock); drm_dev_exit(idx); return 0; @@ -330,11 +328,8 @@ static void amdxdna_remove(struct pci_dev *pdev) struct amdxdna_client, node); while (client) { list_del_init(&client->node); - mutex_unlock(&xdna->dev_lock); - amdxdna_hwctx_remove_all(client); - mutex_lock(&xdna->dev_lock); client = list_first_entry_or_null(&xdna->client_list, struct amdxdna_client, node); } diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index 40bbb3c06320..b6b3b424d1d5 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -116,8 +116,6 @@ struct amdxdna_device_id { struct amdxdna_client { struct list_head node; pid_t pid; - struct mutex hwctx_lock; /* protect hwctx */ - /* do NOT wait this srcu when hwctx_lock is held */ struct srcu_struct hwctx_srcu; struct xarray hwctx_xa; u32 next_hwctxid; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index c0ad8f59e483..609cdb9d371e 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux) platform->bridge.type = platform->pdata.panel_bridge ? DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; + platform->bridge.support_hdcp = true; drm_bridge_add(&platform->bridge); diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index d7e1c2f8f53c..e9f16dbc9535 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev) if (conn->bridge.ddc) conn->bridge.ops |= DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; - if (conn->hpd_gpio) + /* Detecting the monitor requires reading DPCD */ + if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort) conn->bridge.ops |= DRM_BRIDGE_OP_DETECT; if (conn->hpd_irq >= 0) conn->bridge.ops |= DRM_BRIDGE_OP_HPD; diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 8c915427d053..091c5335355a 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -20,6 +20,7 @@ #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/display/drm_hdcp_helper.h> #include <drm/display/drm_hdmi_audio_helper.h> #include <drm/display/drm_hdmi_cec_helper.h> #include <drm/display/drm_hdmi_helper.h> @@ -641,6 +642,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge *bridge, *panel_bridge = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; + bool support_hdcp = false; int connector_type; int ret; @@ -763,6 +765,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (drm_bridge_is_panel(bridge)) panel_bridge = bridge; + + if (bridge->support_hdcp) + support_hdcp = true; } if (connector_type == DRM_MODE_CONNECTOR_Unknown) @@ -849,6 +854,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (panel_bridge) drm_panel_bridge_set_orientation(connector, panel_bridge); + if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) && + IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER)) + drm_connector_attach_content_protection_property(connector, true); + return connector; } EXPORT_SYMBOL_GPL(drm_bridge_connector_init); diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 5bb4c77db2c3..647b49ff2da5 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -271,107 +271,50 @@ npages_in_range(unsigned long start, unsigned long end) } /** - * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier - * @notifier: Pointer to the GPU SVM notifier structure. - * @start: Start address of the range - * @end: End address of the range + * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM + * @gpusvm: Pointer to the GPU SVM structure. + * @start: Start address of the notifier + * @end: End address of the notifier * - * Return: A pointer to the drm_gpusvm_range if found or NULL + * Return: A pointer to the drm_gpusvm_notifier if found or NULL */ -struct drm_gpusvm_range * -drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, - unsigned long end) +struct drm_gpusvm_notifier * +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end) { struct interval_tree_node *itree; - itree = interval_tree_iter_first(¬ifier->root, start, end - 1); + itree = interval_tree_iter_first(&gpusvm->root, start, end - 1); if (itree) - return container_of(itree, struct drm_gpusvm_range, itree); + return container_of(itree, struct drm_gpusvm_notifier, itree); else return NULL; } -EXPORT_SYMBOL_GPL(drm_gpusvm_range_find); +EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find); /** - * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier - * @range__: Iterator variable for the ranges - * @next__: Iterator variable for the ranges temporay storage - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the range - * @end__: End address of the range - * - * This macro is used to iterate over GPU SVM ranges in a notifier while - * removing ranges from it. - */ -#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ - for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ - (next__) = __drm_gpusvm_range_next(range__); \ - (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ - (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) - -/** - * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list - * @notifier: a pointer to the current drm_gpusvm_notifier + * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier + * @notifier: Pointer to the GPU SVM notifier structure. + * @start: Start address of the range + * @end: End address of the range * - * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if - * the current notifier is the last one or if the input notifier is - * NULL. + * Return: A pointer to the drm_gpusvm_range if found or NULL */ -static struct drm_gpusvm_notifier * -__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) -{ - if (notifier && !list_is_last(¬ifier->entry, - ¬ifier->gpusvm->notifier_list)) - return list_next_entry(notifier, entry); - - return NULL; -} - -static struct drm_gpusvm_notifier * -notifier_iter_first(struct rb_root_cached *root, unsigned long start, - unsigned long last) +struct drm_gpusvm_range * +drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, + unsigned long end) { struct interval_tree_node *itree; - itree = interval_tree_iter_first(root, start, last); + itree = interval_tree_iter_first(¬ifier->root, start, end - 1); if (itree) - return container_of(itree, struct drm_gpusvm_notifier, itree); + return container_of(itree, struct drm_gpusvm_range, itree); else return NULL; } - -/** - * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm - * @notifier__: Iterator variable for the notifiers - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the notifier - * @end__: End address of the notifier - * - * This macro is used to iterate over GPU SVM notifiers in a gpusvm. - */ -#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \ - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ - (notifier__) = __drm_gpusvm_notifier_next(notifier__)) - -/** - * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm - * @notifier__: Iterator variable for the notifiers - * @next__: Iterator variable for the notifiers temporay storage - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the notifier - * @end__: End address of the notifier - * - * This macro is used to iterate over GPU SVM notifiers in a gpusvm while - * removing notifiers from it. - */ -#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \ - (next__) = __drm_gpusvm_notifier_next(notifier__); \ - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ - (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) +EXPORT_SYMBOL_GPL(drm_gpusvm_range_find); /** * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier. @@ -473,22 +416,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, EXPORT_SYMBOL_GPL(drm_gpusvm_init); /** - * drm_gpusvm_notifier_find() - Find GPU SVM notifier - * @gpusvm: Pointer to the GPU SVM structure - * @fault_addr: Fault address - * - * This function finds the GPU SVM notifier associated with the fault address. - * - * Return: Pointer to the GPU SVM notifier on success, NULL otherwise. - */ -static struct drm_gpusvm_notifier * -drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, - unsigned long fault_addr) -{ - return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1); -} - -/** * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct * @@ -943,7 +870,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, if (!mmget_not_zero(mm)) return ERR_PTR(-EFAULT); - notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr); + notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1); if (!notifier) { notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr); if (IS_ERR(notifier)) { @@ -1107,7 +1034,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, drm_gpusvm_driver_lock_held(gpusvm); notifier = drm_gpusvm_notifier_find(gpusvm, - drm_gpusvm_range_start(range)); + drm_gpusvm_range_start(range), + drm_gpusvm_range_start(range) + 1); if (WARN_ON_ONCE(!notifier)) return; diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index bbc7fecb6f4a..d6bea8a4fffd 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -421,6 +421,71 @@ */ /** + * DOC: Madvise Logic - Splitting and Traversal + * + * This logic handles GPU VA range updates by generating remap and map operations + * without performing unmaps or merging existing mappings. + * + * 1) The requested range lies entirely within a single drm_gpuva. The logic splits + * the existing mapping at the start and end boundaries and inserts a new map. + * + * :: + * a start end b + * pre: |-----------------------| + * drm_gpuva1 + * + * a start end b + * new: |-----|=========|-------| + * remap map remap + * + * one REMAP and one MAP : Same behaviour as SPLIT and MERGE + * + * 2) The requested range spans multiple drm_gpuva regions. The logic traverses + * across boundaries, remapping the start and end segments, and inserting two + * map operations to cover the full range. + * + * :: a start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c end d + * new: |-------|==========|--------------|========|---------| + * remap1 map1 drm_gpuva2 map2 remap2 + * + * two REMAPS and two MAPS + * + * 3) Either start or end lies within a drm_gpuva. A single remap and map operation + * are generated to update the affected portion. + * + * + * :: a/start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a/start b c end d + * new: |------------------|--------------|========|---------| + * drm_gpuva1 drm_gpuva2 map1 remap1 + * + * :: a start b c/end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c/end d + * new: |-------|==========|--------------|------------------| + * remap1 map1 drm_gpuva2 drm_gpuva3 + * + * one REMAP and one MAP + * + * 4) Both start and end align with existing drm_gpuva boundaries. No operations + * are needed as the range is already covered. + * + * 5) No existing drm_gpuvas. No operations. + * + * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging, + * focusing solely on remap and map operations for efficient traversal and update. + */ + +/** * DOC: Locking * * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of @@ -486,13 +551,18 @@ * u64 addr, u64 range, * struct drm_gem_object *obj, u64 offset) * { + * struct drm_gpuvm_map_req map_req = { + * .map.va.addr = addr, + * .map.va.range = range, + * .map.gem.obj = obj, + * .map.gem.offset = offset, + * }; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op * struct drm_gpuvm_bo *vm_bo; * * driver_lock_va_space(); - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, - * obj, offset); + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req); * if (IS_ERR(ops)) * return PTR_ERR(ops); * @@ -2054,16 +2124,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap); static int op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) + const struct drm_gpuvm_map_req *req) { struct drm_gpuva_op op = {}; + if (!req) + return 0; + op.op = DRM_GPUVA_OP_MAP; - op.map.va.addr = addr; - op.map.va.range = range; - op.map.gem.obj = obj; - op.map.gem.offset = offset; + op.map.va.addr = req->map.va.addr; + op.map.va.range = req->map.va.range; + op.map.gem.obj = req->map.gem.obj; + op.map.gem.offset = req->map.gem.offset; return fn->sm_step_map(&op, priv); } @@ -2088,10 +2160,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, - struct drm_gpuva *va, bool merge) + struct drm_gpuva *va, bool merge, bool madvise) { struct drm_gpuva_op op = {}; + if (madvise) + return 0; + op.op = DRM_GPUVA_OP_UNMAP; op.unmap.va = va; op.unmap.keep = merge; @@ -2102,10 +2177,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, const struct drm_gpuvm_ops *ops, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req, + bool madvise) { + struct drm_gem_object *req_obj = req->map.gem.obj; + const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req; struct drm_gpuva *va, *next; + u64 req_offset = req->map.gem.offset; + u64 req_range = req->map.va.range; + u64 req_addr = req->map.va.addr; u64 req_end = req_addr + req_range; int ret; @@ -2120,19 +2200,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, u64 end = addr + range; bool merge = !!va->gem.obj; + if (madvise && obj) + continue; + if (addr == req_addr) { merge &= obj == req_obj && offset == req_offset; if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; continue; @@ -2153,6 +2236,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr < req_addr) { @@ -2173,6 +2259,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } @@ -2180,6 +2269,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = req_addr, + .map.va.range = end - req_addr, + }; + + ret = op_map_cb(ops, priv, &map_req); + if (ret) + return ret; + } + continue; } @@ -2195,6 +2296,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr > req_addr) { @@ -2203,16 +2307,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, (addr - req_addr); if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + continue; } @@ -2231,14 +2337,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = addr, + .map.va.range = req_end - addr, + }; + + return op_map_cb(ops, priv, &map_req); + } break; } } } - - return op_map_cb(ops, priv, - req_addr, req_range, - req_obj, req_offset); + return op_map_cb(ops, priv, op_map); } static int @@ -2290,7 +2402,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, if (ret) return ret; } else { - ret = op_unmap_cb(ops, priv, va, false); + ret = op_unmap_cb(ops, priv, va, false, false); if (ret) return ret; } @@ -2303,10 +2415,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps * @gpuvm: the &drm_gpuvm representing the GPU VA space * @priv: pointer to a driver private data structure - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: ptr to struct drm_gpuvm_map_req * * This function iterates the given range of the GPU VA space. It utilizes the * &drm_gpuvm_ops to call back into the driver providing the split and merge @@ -2333,8 +2442,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, */ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { const struct drm_gpuvm_ops *ops = gpuvm->ops; @@ -2343,9 +2451,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, ops->sm_step_unmap))) return -EINVAL; - return __drm_gpuvm_sm_map(gpuvm, ops, priv, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); @@ -2421,10 +2527,7 @@ static const struct drm_gpuvm_ops lock_ops = { * @gpuvm: the &drm_gpuvm representing the GPU VA space * @exec: the &drm_exec locking context * @num_fences: for newly mapped objects, the # of fences to reserve - * @req_addr: the start address of the range to unmap - * @req_range: the range of the mappings to unmap - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: ptr to drm_gpuvm_map_req struct * * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that @@ -2445,9 +2548,7 @@ static const struct drm_gpuvm_ops lock_ops = { * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range); * break; * case DRIVER_OP_MAP: - * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, - * op->addr, op->range, - * obj, op->obj_offset); + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req); * break; * } * @@ -2478,18 +2579,17 @@ static const struct drm_gpuvm_ops lock_ops = { int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + struct drm_gpuvm_map_req *req) { + struct drm_gem_object *req_obj = req->map.gem.obj; + if (req_obj) { int ret = drm_exec_prepare_obj(exec, req_obj, num_fences); if (ret) return ret; } - return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock); @@ -2608,13 +2708,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { .sm_step_unmap = drm_gpuva_sm_step, }; +static struct drm_gpuva_ops * +__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req, + bool madvise) +{ + struct drm_gpuva_ops *ops; + struct { + struct drm_gpuvm *vm; + struct drm_gpuva_ops *ops; + } args; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (unlikely(!ops)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + args.vm = gpuvm; + args.ops = ops; + + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise); + if (ret) + goto err_free_ops; + + return ops; + +err_free_ops: + drm_gpuva_ops_free(gpuvm, ops); + return ERR_PTR(ret); +} + /** * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: map request arguments * * This function creates a list of operations to perform splitting and merging * of existent mapping(s) with the newly requested one. @@ -2642,40 +2771,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { */ struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { - struct drm_gpuva_ops *ops; - struct { - struct drm_gpuvm *vm; - struct drm_gpuva_ops *ops; - } args; - int ret; - - ops = kzalloc(sizeof(*ops), GFP_KERNEL); - if (unlikely(!ops)) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&ops->list); - - args.vm = gpuvm; - args.ops = ops; - - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, - req_addr, req_range, - req_obj, req_offset); - if (ret) - goto err_free_ops; - - return ops; - -err_free_ops: - drm_gpuva_ops_free(gpuvm, ops); - return ERR_PTR(ret); + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create); /** + * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @req: map request arguments + * + * This function creates a list of operations to perform splitting + * of existent mapping(s) at start or end, based on the request map. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain map and remap operations, but it + * also can be empty if no operation is required, e.g. if the requested mapping + * already exists is the exact same way. + * + * There will be no unmap operations, a maximum of two remap operations and two + * map operations. The two map operations correspond to: one from start to the + * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end. + * + * Note that before calling this function again with another mapping request it + * is necessary to update the &drm_gpuvm's view of the GPU VA space. The + * previously obtained operations must be either processed or abandoned. To + * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req) +{ + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create); + +/** * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on * unmap * @gpuvm: the &drm_gpuvm representing the GPU VA space diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index fe03915d0095..a712e177b350 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1097,6 +1097,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, EXPORT_SYMBOL(mipi_dsi_dcs_read); /** + * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err + * @ctx: Context for multiple DSI transactions + * @cmd: DCS command + * @data: buffer in which to receive data + * @len: size of receive buffer + * + * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it + * convenient to make several calls in a row. + */ +void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd, + void *data, size_t len) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_DCS_READ, + .tx_buf = &cmd, + .tx_len = 1, + .rx_buf = data, + .rx_len = len + }; + ssize_t ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_device_transfer(dsi, &msg); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "dcs read with command %#x failed: %d\n", cmd, + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_read_multi); + +/** * mipi_dsi_dcs_nop() - send DCS nop packet * @dsi: DSI peripheral device * diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 2896fa7501b1..3d97990170bf 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -185,12 +185,17 @@ struct pvr_vm_bind_op { static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) { switch (bind_op->type) { - case PVR_VM_BIND_TYPE_MAP: + case PVR_VM_BIND_TYPE_MAP: { + const struct drm_gpuvm_map_req map_req = { + .map.va.addr = bind_op->device_addr, + .map.va.range = bind_op->size, + .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj), + .map.gem.offset = bind_op->offset, + }; + return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, - bind_op, bind_op->device_addr, - bind_op->size, - gem_from_pvr_gem(bind_op->pvr_obj), - bind_op->offset); + bind_op, &map_req); + } case PVR_VM_BIND_TYPE_UNMAP: return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3cd8562a5109..210604181c05 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -371,6 +371,12 @@ struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, u64 offset, u64 range_start, u64 range_end) { + struct drm_gpuva_op_map op_map = { + .va.addr = range_start, + .va.range = range_end - range_start, + .gem.obj = obj, + .gem.offset = offset, + }; struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuvm_bo *vm_bo; struct msm_gem_vma *vma; @@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, if (obj) GEM_WARN_ON((range_end - range_start) > obj->size); - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); + drm_gpuva_init_from_op(&vma->base, &op_map); vma->mapped = false; ret = drm_gpuva_insert(&vm->base, &vma->base); @@ -1171,11 +1177,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) op->obj_offset); break; case MSM_VM_BIND_OP_MAP: - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, - op->iova, op->range, - op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for @@ -1282,10 +1294,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) if (op->flags & MSM_VM_BIND_OP_DUMP) arg.flags |= MSM_VMA_DUMP; fallthrough; - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, - op->range, op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index ddfc46bc1b3e..d94a85509176 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, break; case OP_MAP: { struct nouveau_uvma_region *reg; + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->va.addr, + .map.va.range = op->va.range, + .map.gem.obj = op->gem.obj, + .map.gem.offset = op->gem.offset, + }; reg = nouveau_uvma_region_find_first(uvmm, op->va.addr, @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, } op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base, - op->va.addr, - op->va.range, - op->gem.obj, - op->gem.offset); + &map_req); if (IS_ERR(op->ops)) { ret = PTR_ERR(op->ops); goto unwind_continue; diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c index 17898a29efe8..561e6643dcbb 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel) static int nt35560_set_brightness(struct backlight_device *bl) { struct nt35560 *nt = bl_get_data(bl); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - int period_ns = 1023; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; int duty_ns = bl->props.brightness; + int period_ns = 1023; u8 pwm_ratio; u8 pwm_div; - u8 par; - int ret; if (backlight_is_blank(bl)) { /* Disable backlight */ - par = 0x00; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); - return ret; - } - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, + MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x00); + return dsi_ctx.accum_err; } /* Calculate the PWM duty cycle in n/256's */ @@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl) /* Set up PWM dutycycle ONE byte (differs from the standard) */ dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio); - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, - &pwm_ratio, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret); - return ret; - } /* * Sequence to write PWMDIV: @@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl) * 0x22 PWMDIV * 0x7F 0xAA CMD2 page 1 lock */ - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret); - return ret; - } - ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret); - return ret; - } - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + pwm_ratio); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01); + + mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa); /* Enable backlight */ - par = 0x24; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x24); - return 0; + return dsi_ctx.accum_err; } static const struct backlight_ops nt35560_bl_ops = { @@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = { .max_brightness = 1023, }; -static int nt35560_read_id(struct nt35560 *nt) +static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + struct device dev = dsi_ctx->dsi->dev; u8 vendor, version, panel; u16 val; - int ret; - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1); - if (ret < 0) { - dev_err(nt->dev, "could not vendor ID byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1); - if (ret < 0) { - dev_err(nt->dev, "could not read device version byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1); - if (ret < 0) { - dev_err(nt->dev, "could not read panel ID byte\n"); - return ret; - } + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1); + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1); + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1); + + if (dsi_ctx->accum_err < 0) + return; if (vendor == 0x00) { - dev_err(nt->dev, "device vendor ID is zero\n"); - return -ENODEV; + dev_err(&dev, "device vendor ID is zero\n"); + dsi_ctx->accum_err = -ENODEV; + return; } val = (vendor << 8) | panel; @@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt) case DISPLAY_SONY_ACX424AKP_ID2: case DISPLAY_SONY_ACX424AKP_ID3: case DISPLAY_SONY_ACX424AKP_ID4: - dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", + dev_info(&dev, + "MTP vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; default: - dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", + dev_info(&dev, + "unknown vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; } - - return 0; } static int nt35560_power_on(struct nt35560 *nt) @@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt) static int nt35560_prepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - const u8 mddi = 3; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; int ret; ret = nt35560_power_on(nt); if (ret) return ret; - ret = nt35560_read_id(nt); - if (ret) { - dev_err(nt->dev, "failed to read panel ID (%d)\n", ret); - goto err_power_off; - } + nt35560_read_id(&dsi_ctx); - /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ - ret = mipi_dsi_dcs_set_tear_on(dsi, + /* Enable tearing mode: send TE (tearing effect) at VBLANK */ + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret) { - dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret); - goto err_power_off; - } /* * Set MDDI * * This presumably deactivates the Qualcomm MDDI interface and * selects DSI, similar code is found in other drivers such as the - * Sharp LS043T1LE01 which makes us suspect that this panel may be - * using a Novatek NT35565 or similar display driver chip that shares - * this command. Due to the lack of documentation we cannot know for - * sure. + * Sharp LS043T1LE01. */ - ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI, - &mddi, sizeof(mddi)); - if (ret < 0) { - dev_err(nt->dev, "failed to set MDDI (%d)\n", ret); - goto err_power_off; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3); - /* Exit sleep mode */ - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret) { - dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret); - goto err_power_off; - } - msleep(140); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 140); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn display on (%d)\n", ret); - goto err_power_off; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); if (nt->video_mode) { - /* In video mode turn peripheral on */ - ret = mipi_dsi_turn_on_peripheral(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn on peripheral\n"); - goto err_power_off; - } + mipi_dsi_turn_on_peripheral_multi(&dsi_ctx); } - return 0; - -err_power_off: - nt35560_power_off(nt); - return ret; + if (dsi_ctx.accum_err < 0) + nt35560_power_off(nt); + return dsi_ctx.accum_err; } static int nt35560_unprepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn display off (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + if (dsi_ctx.accum_err < 0) + return dsi_ctx.accum_err; - /* Enter sleep mode */ - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret) { - dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret); - return ret; - } msleep(85); nt35560_power_off(nt); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c index e91f50662997..7e2f4e043d62 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c @@ -7,7 +7,9 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 563f16bae543..0dd62e8b2fa7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -203,7 +203,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); panfrost_gem_mapping_put(perfcnt->mapping); perfcnt->mapping = NULL; - pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); return 0; @@ -279,7 +278,6 @@ void panfrost_perfcnt_close(struct drm_file *file_priv) if (perfcnt->user == pfile) panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); - pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); } diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile index 15294719b09c..02db21748c12 100644 --- a/drivers/gpu/drm/panthor/Makefile +++ b/drivers/gpu/drm/panthor/Makefile @@ -8,6 +8,7 @@ panthor-y := \ panthor_gem.o \ panthor_gpu.o \ panthor_heap.o \ + panthor_hw.o \ panthor_mmu.o \ panthor_sched.o diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index f0b2da5b2b96..81df49880bd8 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -18,6 +18,7 @@ #include "panthor_device.h" #include "panthor_fw.h" #include "panthor_gpu.h" +#include "panthor_hw.h" #include "panthor_mmu.h" #include "panthor_regs.h" #include "panthor_sched.h" @@ -244,6 +245,10 @@ int panthor_device_init(struct panthor_device *ptdev) return ret; } + ret = panthor_hw_init(ptdev); + if (ret) + goto err_rpm_put; + ret = panthor_gpu_init(ptdev); if (ret) goto err_rpm_put; diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 36f1034839c2..9bf06e55eaee 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1402,3 +1402,8 @@ err_unplug_fw: } MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch10.10/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin"); diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index cb7a335e07d7..db69449a5be0 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -35,40 +35,9 @@ struct panthor_gpu { /** @reqs_acked: GPU request wait queue. */ wait_queue_head_t reqs_acked; -}; - -/** - * struct panthor_model - GPU model description - */ -struct panthor_model { - /** @name: Model name. */ - const char *name; - - /** @arch_major: Major version number of architecture. */ - u8 arch_major; - /** @product_major: Major version number of product. */ - u8 product_major; -}; - -/** - * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified - * by a combination of the major architecture version and the major product - * version. - * @_name: Name for the GPU model. - * @_arch_major: Architecture major. - * @_product_major: Product major. - */ -#define GPU_MODEL(_name, _arch_major, _product_major) \ -{\ - .name = __stringify(_name), \ - .arch_major = _arch_major, \ - .product_major = _product_major, \ -} - -static const struct panthor_model gpu_models[] = { - GPU_MODEL(g610, 10, 7), - {}, + /** @cache_flush_lock: Lock to serialize cache flushes */ + struct mutex cache_flush_lock; }; #define GPU_INTERRUPTS_MASK \ @@ -83,66 +52,6 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev) ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE); } -static void panthor_gpu_init_info(struct panthor_device *ptdev) -{ - const struct panthor_model *model; - u32 arch_major, product_major; - u32 major, minor, status; - unsigned int i; - - ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); - ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); - ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); - ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); - ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES); - ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES); - ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES); - ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES); - ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES); - ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS); - ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE); - ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE); - ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES); - for (i = 0; i < 4; i++) - ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i)); - - ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); - - ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); - ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); - ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); - - arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id); - product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id); - major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id); - minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id); - status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id); - - for (model = gpu_models; model->name; model++) { - if (model->arch_major == arch_major && - model->product_major == product_major) - break; - } - - drm_info(&ptdev->base, - "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", - model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16, - major, minor, status); - - drm_info(&ptdev->base, - "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x", - ptdev->gpu_info.l2_features, - ptdev->gpu_info.tiler_features, - ptdev->gpu_info.mem_features, - ptdev->gpu_info.mmu_features, - ptdev->gpu_info.as_present); - - drm_info(&ptdev->base, - "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx", - ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present, - ptdev->gpu_info.tiler_present); -} - static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) { gpu_write(ptdev, GPU_INT_CLEAR, status); @@ -204,8 +113,8 @@ int panthor_gpu_init(struct panthor_device *ptdev) spin_lock_init(&gpu->reqs_lock); init_waitqueue_head(&gpu->reqs_acked); + mutex_init(&gpu->cache_flush_lock); ptdev->gpu = gpu; - panthor_gpu_init_info(ptdev); dma_set_max_seg_size(ptdev->base.dev, UINT_MAX); pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features); @@ -353,6 +262,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev, bool timedout = false; unsigned long flags; + /* Serialize cache flush operations. */ + guard(mutex)(&ptdev->gpu->cache_flush_lock); + spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); if (!drm_WARN_ON(&ptdev->base, ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) { diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c new file mode 100644 index 000000000000..4f2858114e5e --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_hw.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#include "panthor_device.h" +#include "panthor_hw.h" +#include "panthor_regs.h" + +#define GPU_PROD_ID_MAKE(arch_major, prod_major) \ + (((arch_major) << 24) | (prod_major)) + +static char *get_gpu_model_name(struct panthor_device *ptdev) +{ + const u32 gpu_id = ptdev->gpu_info.gpu_id; + const u32 product_id = GPU_PROD_ID_MAKE(GPU_ARCH_MAJOR(gpu_id), + GPU_PROD_MAJOR(gpu_id)); + const bool ray_intersection = !!(ptdev->gpu_info.gpu_features & + GPU_FEATURES_RAY_INTERSECTION); + const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present); + + switch (product_id) { + case GPU_PROD_ID_MAKE(10, 2): + return "Mali-G710"; + case GPU_PROD_ID_MAKE(10, 3): + return "Mali-G510"; + case GPU_PROD_ID_MAKE(10, 4): + return "Mali-G310"; + case GPU_PROD_ID_MAKE(10, 7): + return "Mali-G610"; + case GPU_PROD_ID_MAKE(11, 2): + if (shader_core_count > 10 && ray_intersection) + return "Mali-G715-Immortalis"; + else if (shader_core_count >= 7) + return "Mali-G715"; + + fallthrough; + case GPU_PROD_ID_MAKE(11, 3): + return "Mali-G615"; + case GPU_PROD_ID_MAKE(12, 0): + if (shader_core_count >= 10 && ray_intersection) + return "Mali-G720-Immortalis"; + else if (shader_core_count >= 6) + return "Mali-G720"; + + fallthrough; + case GPU_PROD_ID_MAKE(12, 1): + return "Mali-G620"; + case GPU_PROD_ID_MAKE(13, 0): + if (shader_core_count >= 10 && ray_intersection) + return "Mali-G925-Immortalis"; + else if (shader_core_count >= 6) + return "Mali-G725"; + + fallthrough; + case GPU_PROD_ID_MAKE(13, 1): + return "Mali-G625"; + } + + return "(Unknown Mali GPU)"; +} + +static void panthor_gpu_info_init(struct panthor_device *ptdev) +{ + unsigned int i; + + ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); + ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); + ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); + ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); + ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES); + ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES); + ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES); + ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES); + ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES); + ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS); + ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE); + ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE); + ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES); + for (i = 0; i < 4; i++) + ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i)); + + ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); + + ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); + ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); + + /* Introduced in arch 11.x */ + ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES); +} + +static void panthor_hw_info_init(struct panthor_device *ptdev) +{ + u32 major, minor, status; + + panthor_gpu_info_init(ptdev); + + major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id); + minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id); + status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id); + + drm_info(&ptdev->base, + "%s id 0x%x major 0x%x minor 0x%x status 0x%x", + get_gpu_model_name(ptdev), ptdev->gpu_info.gpu_id >> 16, + major, minor, status); + + drm_info(&ptdev->base, + "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x", + ptdev->gpu_info.l2_features, + ptdev->gpu_info.tiler_features, + ptdev->gpu_info.mem_features, + ptdev->gpu_info.mmu_features, + ptdev->gpu_info.as_present); + + drm_info(&ptdev->base, + "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx", + ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present, + ptdev->gpu_info.tiler_present); +} + +int panthor_hw_init(struct panthor_device *ptdev) +{ + panthor_hw_info_init(ptdev); + + return 0; +} diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h new file mode 100644 index 000000000000..0af6acc6aa6a --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_hw.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#ifndef __PANTHOR_HW_H__ +#define __PANTHOR_HW_H__ + +struct panthor_device; + +int panthor_hw_init(struct panthor_device *ptdev); + +#endif /* __PANTHOR_HW_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 4140f697ba5a..2003b91a8409 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -29,6 +29,7 @@ #include "panthor_device.h" #include "panthor_gem.h" +#include "panthor_gpu.h" #include "panthor_heap.h" #include "panthor_mmu.h" #include "panthor_regs.h" @@ -568,6 +569,35 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr, write_cmd(ptdev, as_nr, AS_COMMAND_LOCK); } +static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr, + u32 op) +{ + const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV; + u32 lsc_flush_op = 0; + int ret; + + if (op == AS_COMMAND_FLUSH_MEM) + lsc_flush_op = CACHE_CLEAN | CACHE_INV; + + ret = wait_ready(ptdev, as_nr); + if (ret) + return ret; + + ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0); + if (ret) + return ret; + + /* + * Explicitly unlock the region as the AS is not unlocked automatically + * at the end of the GPU_CONTROL cache flush command, unlike + * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT. + */ + write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK); + + /* Wait for the unlock command to complete */ + return wait_ready(ptdev, as_nr); +} + static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, u64 iova, u64 size, u32 op) { @@ -585,6 +615,9 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, if (op != AS_COMMAND_UNLOCK) lock_region(ptdev, as_nr, iova, size); + if (op == AS_COMMAND_FLUSH_MEM || op == AS_COMMAND_FLUSH_PT) + return mmu_hw_do_flush_on_gpu_ctrl(ptdev, as_nr, op); + /* Run the MMU operation */ write_cmd(ptdev, as_nr, op); @@ -2169,15 +2202,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, mutex_lock(&vm->op_lock); vm->op_ctx = op; switch (op_type) { - case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: + case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: { + const struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->va.addr, + .map.va.range = op->va.range, + .map.gem.obj = op->map.vm_bo->obj, + .map.gem.offset = op->map.bo_offset, + }; + if (vm->unusable) { ret = -EINVAL; break; } - ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range, - op->map.vm_bo->obj, op->map.bo_offset); + ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req); break; + } case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index 48bbfd40138c..8bee76d01bf8 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -70,6 +70,9 @@ #define GPU_PWR_OVERRIDE0 0x54 #define GPU_PWR_OVERRIDE1 0x58 +#define GPU_FEATURES 0x60 +#define GPU_FEATURES_RAY_INTERSECTION BIT(2) + #define GPU_TIMESTAMP_OFFSET 0x88 #define GPU_CYCLE_COUNT 0x90 #define GPU_TIMESTAMP 0x98 diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index e5805ca646c7..c3315935d8bc 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -131,9 +131,8 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, * in the plane update callback, and here we just check * whenever we must force the modeset. */ - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) output->needs_modeset = true; - } } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 7dfb2006c561..1c15cbf326b7 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -162,18 +162,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; #endif - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) vgdev->has_edid = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) vgdev->has_indirect = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) vgdev->has_resource_assign_uuid = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) vgdev->has_resource_blob = true; - } + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { if (!devm_request_mem_region(&vgdev->vdev->dev, @@ -193,9 +193,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) (unsigned long)vgdev->host_visible_region.addr, (unsigned long)vgdev->host_visible_region.len); } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) vgdev->has_context_init = true; - } DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", vgdev->has_virgl_3d ? '+' : '-', diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 5517cff8715c..e6363c887500 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -47,6 +47,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) *resid = handle + 1; } else { int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) return handle; *resid = handle + 1; @@ -56,9 +57,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { - if (!virtio_gpu_virglrenderer_workaround) { + if (!virtio_gpu_virglrenderer_workaround) ida_free(&vgdev->resource_ida, id - 1); - } } void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 698ea7adb951..29e4b458ae57 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -120,7 +120,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + return PTR_ERR(crtc_state); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_NO_SCALING, diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 55a15e247dd1..8181b22b9b46 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -248,6 +248,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { struct virtio_gpu_ctrl_hdr *cmd; + cmd = virtio_gpu_vbuf_ctrl_hdr(entry); DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", le32_to_cpu(resp->type), @@ -468,6 +469,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, if (vbuf->data_size) { if (is_vmalloc_addr(vbuf->data_buf)) { int sg_ents; + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, &sg_ents); if (!sgt) { diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 2035604121e6..86842247e9d8 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, switch (operation) { case DRM_XE_VM_BIND_OP_MAP: - case DRM_XE_VM_BIND_OP_MAP_USERPTR: - ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, - obj, bo_offset_or_userptr); + case DRM_XE_VM_BIND_OP_MAP_USERPTR: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = addr, + .map.va.range = range, + .map.gem.obj = obj, + .map.gem.offset = bo_offset_or_userptr, + }; + + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req); break; + } case DRM_XE_VM_BIND_OP_UNMAP: ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); break; diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 2ec1b136d603..8d9d4fd078e7 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -1172,6 +1172,10 @@ struct drm_bridge { */ bool pre_enable_prev_first; /** + * @support_hdcp: Indicate that the bridge supports HDCP. + */ + bool support_hdcp; + /** * @ddc: Associated I2C adapter for DDC access, if any. */ struct i2c_adapter *ddc; diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h index 4aedc5423aff..142fc2af1716 100644 --- a/include/drm/drm_gpusvm.h +++ b/include/drm/drm_gpusvm.h @@ -282,6 +282,10 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start, unsigned long end); +struct drm_gpusvm_notifier * +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end); + struct drm_gpusvm_range * drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, unsigned long end); @@ -434,4 +438,70 @@ __drm_gpusvm_range_next(struct drm_gpusvm_range *range) (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ (range__) = __drm_gpusvm_range_next(range__)) +/** + * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier + * @range__: Iterator variable for the ranges + * @next__: Iterator variable for the ranges temporay storage + * @notifier__: Pointer to the GPU SVM notifier + * @start__: Start address of the range + * @end__: End address of the range + * + * This macro is used to iterate over GPU SVM ranges in a notifier while + * removing ranges from it. + */ +#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ + for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ + (next__) = __drm_gpusvm_range_next(range__); \ + (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ + (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) + +/** + * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list + * @notifier: a pointer to the current drm_gpusvm_notifier + * + * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if + * the current notifier is the last one or if the input notifier is + * NULL. + */ +static inline struct drm_gpusvm_notifier * +__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) +{ + if (notifier && !list_is_last(¬ifier->entry, + ¬ifier->gpusvm->notifier_list)) + return list_next_entry(notifier, entry); + + return NULL; +} + +/** + * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm + * @notifier__: Iterator variable for the notifiers + * @gpusvm__: Pointer to the GPU SVM notifier + * @start__: Start address of the notifier + * @end__: End address of the notifier + * + * This macro is used to iterate over GPU SVM notifiers in a gpusvm. + */ +#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \ + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ + (notifier__) = __drm_gpusvm_notifier_next(notifier__)) + +/** + * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm + * @notifier__: Iterator variable for the notifiers + * @next__: Iterator variable for the notifiers temporay storage + * @gpusvm__: Pointer to the GPU SVM notifier + * @start__: Start address of the notifier + * @end__: End address of the notifier + * + * This macro is used to iterate over GPU SVM notifiers in a gpusvm while + * removing notifiers from it. + */ +#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \ + (next__) = __drm_gpusvm_notifier_next(notifier__); \ + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ + (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) + #endif /* __DRM_GPUSVM_H__ */ diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 274532facfd6..4a22b9d848f7 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -160,15 +160,6 @@ struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); -static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) -{ - va->va.addr = addr; - va->va.range = range; - va->gem.obj = obj; - va->gem.offset = offset; -} - /** * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is * invalidated @@ -1058,10 +1049,23 @@ struct drm_gpuva_ops { */ #define drm_gpuva_next_op(op) list_next_entry(op, entry) +/** + * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]() + */ +struct drm_gpuvm_map_req { + /** + * @op_map: struct drm_gpuva_op_map + */ + struct drm_gpuva_op_map map; +}; + struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); + const struct drm_gpuvm_map_req *req); +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req); + struct drm_gpuva_ops * drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); @@ -1079,8 +1083,10 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op) { - drm_gpuva_init(va, op->va.addr, op->va.range, - op->gem.obj, op->gem.offset); + va->va.addr = op->va.addr; + va->va.range = op->va.range; + va->gem.obj = op->gem.obj; + va->gem.offset = op->gem.offset; } /** @@ -1205,16 +1211,14 @@ struct drm_gpuvm_ops { }; int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); + const struct drm_gpuvm_map_req *req); int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, u64 addr, u64 range); int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences, - u64 req_addr, u64 req_range, - struct drm_gem_object *obj, u64 offset); + struct drm_gpuvm_map_req *req); int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, u64 req_addr, u64 req_range); diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 16eeb9552064..3aba7b380c8d 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -342,6 +342,8 @@ ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, const void *data, size_t len); ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, size_t len); +void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd, + void *data, size_t len); int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); @@ -404,6 +406,22 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); } while (0) /** + * mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a + * generic write packet + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions + * @seq: buffer containing the payload + */ +#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \ + do { \ + const u8 d[] = { seq }; \ + mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \ + } while (0) + +/** * mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload * * This macro will print errors for you and error handling is optimized for @@ -420,6 +438,23 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); } while (0) /** + * mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant + * payload + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions + * @cmd: Command + * @seq: buffer containing data to be transmitted + */ +#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \ + do { \ + const u8 d[] = { cmd, seq }; \ + mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \ + } while (0) + +/** * mipi_dsi_dual - send the same MIPI DSI command to two interfaces * * This macro will send the specified MIPI DSI command twice, once per each of diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index e1f43deb7eca..467d365ed7ba 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -327,6 +327,9 @@ struct drm_panthor_gpu_info { /** @pad: MBZ. */ __u32 pad; + + /** @gpu_features: Bitmask describing supported GPU-wide features */ + __u64 gpu_features; }; /** diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs index 3bb7c83966cf..4a62f9fd88b7 100644 --- a/rust/kernel/drm/device.rs +++ b/rust/kernel/drm/device.rs @@ -10,7 +10,8 @@ use crate::{ error::from_err_ptr, error::Result, prelude::*, - types::{ARef, AlwaysRefCounted, Opaque}, + sync::aref::{ARef, AlwaysRefCounted}, + types::Opaque, }; use core::{mem, ops::Deref, ptr, ptr::NonNull}; diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs index fe7e8d06961a..8fefae41bcc6 100644 --- a/rust/kernel/drm/driver.rs +++ b/rust/kernel/drm/driver.rs @@ -8,7 +8,7 @@ use crate::{ bindings, device, devres, drm, error::{to_result, Result}, prelude::*, - types::ARef, + sync::aref::ARef, }; use macros::vtable; diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs index b71821cfb5ea..a822aedee949 100644 --- a/rust/kernel/drm/gem/mod.rs +++ b/rust/kernel/drm/gem/mod.rs @@ -10,7 +10,8 @@ use crate::{ drm::driver::{AllocImpl, AllocOps}, error::{to_result, Result}, prelude::*, - types::{ARef, AlwaysRefCounted, Opaque}, + sync::aref::{ARef, AlwaysRefCounted}, + types::Opaque, }; use core::{mem, ops::Deref, ptr::NonNull}; |
