summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTiago Vignatti <tiago.vignatti@intel.com>2015-12-22 19:36:45 -0200
committerInki Dae <inki.dae@samsung.com>2017-03-08 01:32:08 -0800
commit6f5e5e6f22415a59cc6f8cdaa9c15ca50fe6f043 (patch)
tree03063234b0d2d9f830f73d02ba2302f34f7e6e07
parent410b9e07e8f749d0e4822db19a2f1a27cdcc25c0 (diff)
downloadlinux-exynos-6f5e5e6f22415a59cc6f8cdaa9c15ca50fe6f043.tar.gz
linux-exynos-6f5e5e6f22415a59cc6f8cdaa9c15ca50fe6f043.tar.bz2
linux-exynos-6f5e5e6f22415a59cc6f8cdaa9c15ca50fe6f043.zip
dma-buf: Remove range-based flush
This patch removes range-based information used for optimizations in begin_cpu_access and end_cpu_access. We don't have any user nor implementation using range-based flush. It seems a consensus that if we ever want something like that again (or even more robust using 2D, 3D sub-range regions) we can use the upcoming dma-buf sync ioctl for such. Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Daniel Vetter <daniel.vetter@intel.com> Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com> Reviewed-by: Stéphane Marchesin <marcheu@chromium.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1450820214-12509-3-git-send-email-tiago.vignatti@intel.com [mszyprow: fixed begin/end_cpu_access calls in drivers/gpu/arm/midgard/r12p0_04rel0/mali_kbase_softjobs.c, backport of mainline commit 831e9da7dc5c22fd2a5fb64e999f6e077a4338c3] Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Change-Id: Ic0b70da15ce997dad5da9d53c808bf92a9f332ac
-rw-r--r--Documentation/dma-buf-sharing.txt19
-rw-r--r--drivers/dma-buf/dma-buf.c13
-rw-r--r--drivers/gpu/arm/midgard/r12p0_04rel0/mali_kbase_softjobs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_test.c4
-rw-r--r--include/linux/dma-buf.h12
9 files changed, 26 insertions, 44 deletions
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 480c8de3c2c4..4f4a84b6903a 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -257,17 +257,15 @@ Access to a dma_buf from the kernel context involves three steps:
Interface:
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- size_t start, size_t len,
enum dma_data_direction direction)
This allows the exporter to ensure that the memory is actually available for
cpu access - the exporter might need to allocate or swap-in and pin the
backing storage. The exporter also needs to ensure that cpu access is
- coherent for the given range and access direction. The range and access
- direction can be used by the exporter to optimize the cache flushing, i.e.
- access outside of the range or with a different direction (read instead of
- write) might return stale or even bogus data (e.g. when the exporter needs to
- copy the data to temporary storage).
+ coherent for the access direction. The direction can be used by the exporter
+ to optimize the cache flushing, i.e. access with a different direction (read
+ instead of write) might return stale or even bogus data (e.g. when the
+ exporter needs to copy the data to temporary storage).
This step might fail, e.g. in oom conditions.
@@ -322,14 +320,13 @@ Access to a dma_buf from the kernel context involves three steps:
3. Finish access
- When the importer is done accessing the range specified in begin_cpu_access,
- it needs to announce this to the exporter (to facilitate cache flushing and
- unpinning of any pinned resources). The result of any dma_buf kmap calls
- after end_cpu_access is undefined.
+ When the importer is done accessing the CPU, it needs to announce this to
+ the exporter (to facilitate cache flushing and unpinning of any pinned
+ resources). The result of any dma_buf kmap calls after end_cpu_access is
+ undefined.
Interface:
void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
- size_t start, size_t len,
enum dma_data_direction dir);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 155c1464948e..b2ac13b4ddaa 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -539,13 +539,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to prepare cpu access for.
- * @start: [in] start of range for cpu access.
- * @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
*
* Can return negative error values, returns 0 on success.
*/
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
int ret = 0;
@@ -554,8 +552,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
return -EINVAL;
if (dmabuf->ops->begin_cpu_access)
- ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
- len, direction);
+ ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
return ret;
}
@@ -567,19 +564,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to complete cpu access for.
- * @start: [in] start of range for cpu access.
- * @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
*
* This call must always succeed.
*/
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
WARN_ON(!dmabuf);
if (dmabuf->ops->end_cpu_access)
- dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+ dmabuf->ops->end_cpu_access(dmabuf, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
diff --git a/drivers/gpu/arm/midgard/r12p0_04rel0/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/r12p0_04rel0/mali_kbase_softjobs.c
index 041976190d61..97ad5e7cf49c 100644
--- a/drivers/gpu/arm/midgard/r12p0_04rel0/mali_kbase_softjobs.c
+++ b/drivers/gpu/arm/midgard/r12p0_04rel0/mali_kbase_softjobs.c
@@ -834,9 +834,7 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
KBASE_DEBUG_ASSERT(dma_buf != NULL);
- ret = dma_buf_begin_cpu_access(dma_buf, 0,
- buf_data->nr_extres_pages*PAGE_SIZE,
- DMA_FROM_DEVICE);
+ ret = dma_buf_begin_cpu_access(dma_buf, DMA_FROM_DEVICE);
if (ret)
goto out_unlock;
@@ -855,9 +853,7 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
if (target_page_nr >= buf_data->nr_pages)
break;
}
- dma_buf_end_cpu_access(dma_buf, 0,
- buf_data->nr_extres_pages*PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_buf_end_cpu_access(dma_buf, DMA_FROM_DEVICE);
break;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7998da27c500..2f8c206ab11f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -196,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
return -EINVAL;
}
-static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 344fd789170d..baf89c36f7c8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -79,7 +79,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer)
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
- size_t start, size_t len, enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
@@ -94,7 +94,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
}
static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
- size_t start, size_t len, enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index cd8d183dcfe5..7f398801f046 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -410,7 +410,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (ufb->obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
- 0, ufb->obj->base.size,
DMA_FROM_DEVICE);
if (ret)
goto unlock;
@@ -426,7 +425,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (ufb->obj->base.import_attach) {
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
- 0, ufb->obj->base.size,
DMA_FROM_DEVICE);
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index abbc42a56e7c..f399a5c76659 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1057,8 +1057,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
{
}
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
@@ -1076,8 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
return PTR_ERR_OR_ZERO(vaddr);
}
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 3bc461cbbfa3..7561e9b2e4af 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -109,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
if (offset > dma_buf->size || size > dma_buf->size - offset)
return -EINVAL;
- ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ ret = dma_buf_begin_cpu_access(dma_buf, dir);
if (ret)
return ret;
@@ -139,7 +139,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
copy_offset = 0;
}
err:
- dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ dma_buf_end_cpu_access(dma_buf, dir);
return ret;
}
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f98bd7068d55..532108ea0c1c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
- * respectively pin the objet into memory.
+ * respectively pin the object into memory.
* @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@ struct dma_buf_ops {
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
- int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
- enum dma_data_direction);
- void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
- enum dma_data_direction);
+ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+ void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);