summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-02-28 11:44:29 +1000
committerDave Airlie <airlied@redhat.com>2018-02-28 11:44:29 +1000
commit8bb5b22255dac09cd74eae9f86333b474d69fbbe (patch)
tree4cb47d92abb336be63d16636f5ab15723077c993 /drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
parent727edc744098e2a3d2f9f9ce53edb49cb7817ea1 (diff)
parent9aff8b2ae71dcf7f02443821a894a736f40e4919 (diff)
Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Expose thermal thresholds through hwmon properly - Rework HDP flushing for rings and CPU - Improved dual-link DVI handling in DC - Lots of code clean up - Additional DC clean up - Allow scanout from system memory on CZ/BR/ST - Improved PASID/VM integration - Expose GPU voltage and power via hwmon - Initial wattman-like support - Initial power profiles for use-case optimized performance - Rework GPUVM TLB flushing - Rework IP offset handling for SOC15 asics - Add CRC support in DC - Fixes for mmhub powergating - Initial regamma/degamma/CTM support in DC - ttm cleanups and simplifications - ttm OOM avoidance fixes * 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (348 commits) Revert "drm/radeon/pm: autoswitch power state when in balanced mode" drm/radeon: use drm_gem_private_object_init drm/amdgpu: use drm_gem_private_object_init drm/amdgpu: mitigate workaround for i915 drm/amdgpu: implement amdgpu_gem_map_(attach/detach) drm/amdgpu/powerplay/smu7: drop refresh rate checks for mclk switching drm/amdgpu/cgs: add refresh rate checking to non-DC display code drm/amd/powerplay/smu7: allow mclk switching with no displays drm/amd/powerplay/vega10: allow mclk switching with no displays drm/amd/powerplay: use PP_CAP macro for disable_mclk_switching_for_frame_lock drm/amd/powerplay: remove unused headers drm/amdgpu_gem: fix error handling path in amdgpu_gem_va_update_vm drm/amdgpu: update the PASID mapping only on demand drm/amdgpu: separate PASID mapping from VM flush v2 drm/amd/display: Fix increment when sampling OTF in DCE drm/amd/display: De PQ implementation drm/amd/display: Remove unused dm_pp_ interfaces drm/amd/display: Add logging for aux DPCD access drm/amd/display: Set vsc pack revision when DPCD revision is >= 1.2 drm/amd/display: provide an interface to query firmware version ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c143
1 files changed, 118 insertions, 25 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index ae9c106979d7..8ce74a1d9966 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -26,9 +26,12 @@
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_display.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
+static const struct dma_buf_ops amdgpu_dmabuf_ops;
+
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -103,7 +106,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
@@ -112,49 +115,72 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
return &bo->gem_base;
}
-int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
+static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
{
+ struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- long ret = 0;
-
- ret = amdgpu_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
- /*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
- */
- ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(ret < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
- amdgpu_bo_unreserve(bo);
- return ret;
+ long r;
+
+ r = drm_gem_map_attach(dma_buf, target_dev, attach);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ goto error_detach;
+
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops) {
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(r < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+ goto error_unreserve;
+ }
}
/* pin buffer into GTT */
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (r)
+ goto error_unreserve;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count++;
+error_unreserve:
amdgpu_bo_unreserve(bo);
- return ret;
+
+error_detach:
+ if (r)
+ drm_gem_map_detach(dma_buf, attach);
+ return r;
}
-void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
+static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
{
+ struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0;
ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0))
- return;
+ goto error;
amdgpu_bo_unpin(bo);
- if (bo->prime_shared_count)
+ if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count)
bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
+
+error:
+ drm_gem_map_detach(dma_buf, attach);
}
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
@@ -164,6 +190,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { true, false };
+ u32 domain = amdgpu_display_framebuffer_domains(adev);
+ int ret;
+ bool reads = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_FROM_DEVICE);
+
+ if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
+ return 0;
+
+ /* move to gtt */
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ }
+
+ amdgpu_bo_unreserve(bo);
+ return ret;
+}
+
+static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+ .attach = amdgpu_gem_map_attach,
+ .detach = amdgpu_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .begin_cpu_access = amdgpu_gem_begin_cpu_access,
+ .map = drm_gem_dmabuf_kmap,
+ .map_atomic = drm_gem_dmabuf_kmap_atomic,
+ .unmap = drm_gem_dmabuf_kunmap,
+ .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
@@ -176,7 +246,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return ERR_PTR(-EPERM);
buf = drm_gem_prime_export(dev, gobj, flags);
- if (!IS_ERR(buf))
+ if (!IS_ERR(buf)) {
buf->file->f_mapping = dev->anon_inode->i_mapping;
+ buf->ops = &amdgpu_dmabuf_ops;
+ }
+
return buf;
}
+
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj;
+
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, dma_buf);
+}