diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 175 |
1 files changed, 88 insertions, 87 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 5ab351409312..aa43265f4f4f 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -160,12 +160,12 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { drm_prime_gem_destroy(obj, shmem->sgt); } else { dma_resv_lock(shmem->base.resv, NULL); - drm_WARN_ON(obj->dev, shmem->vmap_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count)); if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, @@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); } if (shmem->pages) - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); - drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count)); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count)); dma_resv_unlock(shmem->base.resv); } @@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); -static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct page **pages; dma_resv_assert_held(shmem->base.resv); - if (shmem->pages_use_count++ > 0) + if (refcount_inc_not_zero(&shmem->pages_use_count)) return 0; pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", PTR_ERR(pages)); - shmem->pages_use_count = 0; return PTR_ERR(pages); } @@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) shmem->pages = pages; + refcount_set(&shmem->pages_use_count, 1); + return 0; } /* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object * @shmem: shmem GEM object * * This function decreases the use count and puts the backing pages when use drops to zero. */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; dma_resv_assert_held(shmem->base.resv); - if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) - return; - - if (--shmem->pages_use_count > 0) - return; - + if (refcount_dec_and_test(&shmem->pages_use_count)) { #ifdef CONFIG_X86 - if (shmem->map_wc) - set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); + if (shmem->map_wc) + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); #endif - drm_gem_put_pages(obj, shmem->pages, - shmem->pages_mark_dirty_on_put, - shmem->pages_mark_accessed_on_put); - shmem->pages = NULL; + drm_gem_put_pages(obj, shmem->pages, + shmem->pages_mark_dirty_on_put, + shmem->pages_mark_accessed_on_put); + shmem->pages = NULL; + } } -EXPORT_SYMBOL(drm_gem_shmem_put_pages); +EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked); int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { @@ -255,9 +253,14 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) dma_resv_assert_held(shmem->base.resv); - drm_WARN_ON(shmem->base.dev, shmem->base.import_attach); + drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); + + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); + if (!ret) + refcount_set(&shmem->pages_pin_count, 1); return ret; } @@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) { dma_resv_assert_held(shmem->base.resv); - drm_gem_shmem_put_pages(shmem); + if (refcount_dec_and_test(&shmem->pages_pin_count)) + drm_gem_shmem_put_pages_locked(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); @@ -286,7 +290,10 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) struct drm_gem_object *obj = &shmem->base; int ret; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); if (ret) @@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) return ret; } -EXPORT_SYMBOL(drm_gem_shmem_pin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_pin); /** * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object @@ -309,16 +316,19 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + + if (refcount_dec_not_one(&shmem->pages_pin_count)) + return; dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_unpin_locked(shmem); dma_resv_unlock(shmem->base.resv); } -EXPORT_SYMBOL(drm_gem_shmem_unpin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin); /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing * store. @@ -327,47 +337,43 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin); * exists for the buffer backing the shmem GEM object. It hides the differences * between dma-buf imported and natively allocated objects. * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked(). * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; - if (obj->import_attach) { - ret = dma_buf_vmap(obj->import_attach->dmabuf, map); - if (!ret) { - if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->import_attach->dmabuf, map); - return -EIO; - } - } + if (drm_gem_is_imported(obj)) { + ret = dma_buf_vmap(obj->dma_buf, map); } else { pgprot_t prot = PAGE_KERNEL; dma_resv_assert_held(shmem->base.resv); - if (shmem->vmap_use_count++ > 0) { + if (refcount_inc_not_zero(&shmem->vmap_use_count)) { iosys_map_set_vaddr(map, shmem->vaddr); return 0; } - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_pin_locked(shmem); if (ret) - goto err_zero_use; + return ret; if (shmem->map_wc) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); - if (!shmem->vaddr) + if (!shmem->vaddr) { ret = -ENOMEM; - else + } else { iosys_map_set_vaddr(map, shmem->vaddr); + refcount_set(&shmem->vmap_use_count, 1); + } } if (ret) { @@ -378,50 +384,44 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, return 0; err_put_pages: - if (!obj->import_attach) - drm_gem_shmem_put_pages(shmem); -err_zero_use: - shmem->vmap_use_count = 0; + if (!drm_gem_is_imported(obj)) + drm_gem_shmem_unpin_locked(shmem); return ret; } -EXPORT_SYMBOL(drm_gem_shmem_vmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked); /* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. + * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count + * drops to zero. * * This function hides the differences between dma-buf imported and natively * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; - if (obj->import_attach) { - dma_buf_vunmap(obj->import_attach->dmabuf, map); + if (drm_gem_is_imported(obj)) { + dma_buf_vunmap(obj->dma_buf, map); } else { dma_resv_assert_held(shmem->base.resv); - if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) - return; - - if (--shmem->vmap_use_count > 0) - return; + if (refcount_dec_and_test(&shmem->vmap_use_count)) { + vunmap(shmem->vaddr); + shmem->vaddr = NULL; - vunmap(shmem->vaddr); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_unpin_locked(shmem); + } } - - shmem->vaddr = NULL; } -EXPORT_SYMBOL(drm_gem_shmem_vunmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked); static int drm_gem_shmem_create_with_handle(struct drm_file *file_priv, @@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, /* Update madvise status, returns true if not purged, else * false or -errno. */ -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv) { dma_resv_assert_held(shmem->base.resv); @@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) return (madv >= 0); } -EXPORT_SYMBOL(drm_gem_shmem_madvise); +EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked); -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; @@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); shmem->sgt = NULL; - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); shmem->madv = -1; @@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge); +EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object @@ -566,7 +566,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); dma_resv_lock(shmem->base.resv, NULL); @@ -575,8 +575,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) * mmap'd, vm_open() just grabs an additional reference for the new * mm the vma is getting copied into (ie. on fork()). */ - if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) - shmem->pages_use_count++; + drm_WARN_ON_ONCE(obj->dev, + !refcount_inc_not_zero(&shmem->pages_use_count)); dma_resv_unlock(shmem->base.resv); @@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); dma_resv_lock(shmem->base.resv, NULL); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); dma_resv_unlock(shmem->base.resv); drm_gem_vm_close(vma); @@ -618,7 +618,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct struct drm_gem_object *obj = &shmem->base; int ret; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { /* Reset both vm_ops and vm_private_data, so we don't end up with * vm_ops pointing to our implementation if the dma-buf backend * doesn't set those fields. @@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return -EINVAL; dma_resv_lock(shmem->base.resv, NULL); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); dma_resv_unlock(shmem->base.resv); if (ret) @@ -663,14 +663,15 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, struct drm_printer *p, unsigned int indent) { - if (shmem->base.import_attach) + if (drm_gem_is_imported(&shmem->base)) return; - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); - drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); + drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count)); + drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count)); + drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count)); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); } -EXPORT_SYMBOL(drm_gem_shmem_print_info); +EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info); /** * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned @@ -690,7 +691,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } @@ -705,9 +706,9 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ if (shmem->sgt) return shmem->sgt; - drm_WARN_ON(obj->dev, obj->import_attach); + drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) return ERR_PTR(ret); @@ -729,7 +730,7 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); return ERR_PTR(ret); } |