summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/dma-buf-sync.txt258
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dmabuf-sync.c1041
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c11
-rw-r--r--include/linux/dmabuf-sync.h108
-rw-r--r--include/linux/reservation.h1
8 files changed, 1422 insertions, 15 deletions
diff --git a/Documentation/dma-buf-sync.txt b/Documentation/dma-buf-sync.txt
new file mode 100644
index 00000000000..589cf5d3d75
--- /dev/null
+++ b/Documentation/dma-buf-sync.txt
@@ -0,0 +1,258 @@
+ DMA Buffer Synchronization Framework
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Inki Dae
+ <inki dot dae at samsung dot com>
+ <daeinki at gmail dot com>
+
+This document is a guide for device-driver writers describing the DMA buffer
+synchronization API. This document also describes how to use the API to
+use buffer synchronization mechanism between DMA and DMA, CPU and DMA, and
+CPU and CPU.
+
+The DMA Buffer synchronization API provides buffer synchronization mechanism
+based on DMA buffer sharing machanism[1], dmafence and reservation
+frameworks[2]; i.e., buffer access control to CPU and DMA, and easy-to-use
+interfaces for device drivers and user application. And this API can be used
+for all dma devices using system memory as dma buffer, especially for most
+ARM based SoCs.
+
+
+Motivation
+----------
+
+There are some cases userspace process needs this buffer synchronization
+framework. One of which is to primarily enhance GPU rendering performance
+in case that 3D app draws somthing in a buffer using CPU, and other process
+composes the buffer with its own backbuffer using GPU.
+
+In case of 3D app, the app calls glFlush to submit 3d commands to GPU driver
+instead of glFinish for more performance. The reason, we call glFlush, is
+that glFinish blocks caller's task until the execution of the 3d commands is
+completed. So that makes GPU and CPU more idle. As a result, 3d rendering
+performance with glFinish is quite lower than glFlush.
+
+However, the use of glFlush has one issue that the the buffer shared with
+GPU could be broken when CPU accesses the buffer just after glFlush because
+CPU cannot be aware of the completion of GPU access to the buffer.
+Of course, an applications can be aware of that time using eglWaitGL
+but this function is valid only in case that all application use the same
+3d context. However, for the gpu performance, applications could use different
+3d contexts.
+
+The below summarizes how app's window is displayed on screen with X server:
+1. X client requests a window buffer to X server.
+2. X client draws something in the window buffer using CPU.
+3. X client requests SWAP to X server.
+4. X server notifies a damage event to Composite Manager.
+5. Composite Manager gets the window buffer (front buffer) through
+ DRI2GetBuffers.
+6. Composite Manager composes the window buffer and its own back buffer
+ using GPU. At this time, eglSwapBuffers is called: internally, 3d
+ commands are flushed to gpu driver.
+7. Composite Manager requests SWAP to X server.
+8. X server performs drm page flip. At this time, the window buffer is
+ displayed on screen.
+
+HTML5-based web applications also have the same issue. Web browser and
+web application are different process. The Web application can draw something
+in its own buffer using CPU, and then the Web Browser can compose the buffer
+with its own back buffer.
+
+Thus, in such cases, a shared buffer could be broken as one process draws
+something in a buffer using CPU if other process composes the buffer with
+its own buffer using GPU without any sync mechanism. That is why we need
+userspace sync interface, fcntl system call.
+
+And last one is a deferred page flip issue. This issue is that a window
+buffer rendered can be displayed on screen in about 32ms in worst case:
+assume that the gpu rendering would be completed within 16ms.
+That could be incurred when compositing a pixmap buffer with a window buffer
+using GPU and when vsync is just started. At this time, X server waits for
+a vblank event to get a window buffer so 3d rendering will be delayed
+up to about 16ms. As a result, the window buffer will be displayed in
+two vsyncs (about 32ms), which in turn, that would incur slow responsiveness.
+
+The below shows the deferred page flip issue in worst case:
+
+ |------------ <- vsync signal
+ |<------ DRI2GetBuffers
+ |
+ |
+ |
+ |------------ <- vsync signal
+ |<------ Request gpu rendering
+ time |
+ |
+ |<------ Request page flip (deferred)
+ |------------ <- vsync signal
+ |<------ Displayed on screen
+ |
+ |
+ |
+ |------------ <- vsync signal
+
+We could enhance the responsiveness by doing that X server skips to wait
+for vsync with sync mechanism: X server will be a new buffer back to X client
+without waiting for vsync so X client can use more cpu than waiting for vsync
+and the buffer will be synchronized in kernel driver implicitly, which is
+transparent to userspace.
+
+
+Access types
+------------
+
+DMA_BUF_ACCESS_R - CPU will access a buffer for read.
+DMA_BUF_ACCESS_W - CPU will access a buffer for read or write.
+DMA_BUF_ACCESS_DMA_R - DMA will access a buffer for read
+DMA_BUF_ACCESS_DMA_W - DMA will access a buffer for read or write.
+
+
+Generic userspace interfaces
+-----------------------
+
+This framework includes fcntl system call[3] as interfaces exported
+to userspace. As you know, a process can see a buffer object as a file
+descriptor. So fcntl() call with the file descriptor means that processes
+cannot access the buffer being managed by a dma buf object according to fcntl
+request command until fcntl call with unlock will be requested.
+
+
+API set
+-------
+
+bool is_dmabuf_sync_supported(void);
+ - Check if dmabuf sync is supported or not.
+
+struct dmabuf_sync *dmabuf_sync_init(const char *name,
+ struct dmabuf_sync_priv_ops *ops,
+ void *priv);
+ - Allocate and initialize a new dmabuf_sync.
+
+ This function should be called by DMA driver after device context
+ is created. The created dmabuf_sync object should be set to the
+ context of driver. Each DMA driver and task should have one
+ dmabuf_sync object.
+
+
+void dmabuf_sync_fini(struct dmabuf_sync *sync)
+ - Release a given dmabuf_sync object and things relevant to it.
+
+ This function should be called if some operation failed after
+ dmabuf_sync_init call to release relevant resources, and after
+ dmabuf_sync_signal or dmabuf_sync_signal_all function is called.
+
+
+int dmabuf_sync_get(struct dmabuf_sync *sync, void *sync_buf,
+ unsigned int ctx, unsigned int type)
+ - Add a given dmabuf object to dmabuf_sync object.
+
+ This function should be called after dmabuf_sync_init function is
+ called. The caller can tie up multiple dmabufs into its own
+ dmabuf_sync object by calling this function several times.
+
+
+void dmabuf_sync_put(struct dmabuf_sync *sync, struct dma_buf *dmabuf)
+ - Delete a dmabuf_sync_object object to a given dmabuf.
+
+ This function should be called if some operation failed after
+ dmabuf_sync_get function is called to release the dmabuf or
+ after DMA driver or task completes the use of the dmabuf.
+
+
+void dmabuf_sync_put_all(struct dmabuf_sync *sync)
+ - Release all dmabuf_sync_object objects to a given dmabuf_sync object.
+
+ This function should be called if some operation failed after
+ dmabuf_sync_get function is called to release all dmabuf_sync_object
+ objects, or after DMA driver or task completes the use of all
+ dmabufs.
+
+long dmabuf_sync_wait_all(struct dmabuf_sync *sync)
+ - Wait for the completion of DMA or CPU access to all dmabufs.
+
+ The caller should call this function prior to CPU or DMA access to
+ dmabufs so that other CPU or DMA cannot access the dmabufs.
+
+int dmabuf_sync_wait(struct dma_buf *dmabuf, unsigned int ctx,
+ unsigned int access_type)
+ - Wait for the completion of DMA or CPU access to a dmabuf.
+
+ The caller should call this function prior to CPU or DMA access to
+ a dmabuf so that other CPU and DMA device cannot access the dmabuf.
+
+int dmabuf_sync_signal_all(struct dmabuf_sync *sync)
+ - Wake up all threads blocked when they tried to access dmabufs
+ registered to a given dmabuf_sync object.
+
+ The caller should call this function after CPU or DMA access to
+ the dmabufs is completed so that other CPU and DMA device can access
+ the dmabufs.
+
+void dmabuf_sync_signal(struct dma_buf *dmabuf)
+ - Wake up all threads blocked when they tried to access a given dmabuf.
+
+ The caller should call this function after CPU or DMA access to
+ the dmabuf is completed so that other CPU and DMA device can access
+ the dmabuf.
+
+
+Tutorial for device driver
+--------------------------
+
+1. Allocate and Initialize a dmabuf_sync object:
+ struct dmabuf_sync *sync;
+
+ sync = dmabuf_sync_init("test sync", &xxx_sync_ops, context);
+ ...
+
+2. Add a dmabuf to the dmabuf_sync object when setting up dma buffer
+ relevant registers:
+ dmabuf_sync_get(sync, dmabuf, context, DMA_BUF_ACCESS_READ);
+ ...
+
+3. Add a fence of this driver to all dmabufs added to the dmabuf_sync
+ object before DMA or CPU accesses the dmabufs:
+ dmabuf_sync_wait_all(sync);
+ ...
+
+4. Now the DMA of this device can access all dmabufs.
+
+5. Signal all dmabufs added to a dmabuf_sync object after DMA or CPU access
+ to these dmabufs is completed:
+ dmabuf_sync_signal_all(sync);
+
+ And call the following functions to release all resources,
+ dmabuf_sync_put_all(sync);
+ dmabuf_sync_fini(sync);
+
+
+Tutorial for user application
+-----------------------------
+ struct flock filelock;
+
+1. Lock a dmabuf:
+ filelock.l_type = F_WRLCK or F_RDLCK;
+
+ /* lock entire region to the dma buf. */
+ filelock.lwhence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ fcntl(dmabuf fd, F_SETLKW or F_SETLK, &filelock);
+ ...
+ CPU can access to the dmabuf
+
+2. Unlock a dmabuf:
+ filelock.l_type = F_UNLCK;
+
+ fcntl(dmabuf fd, F_SETLKW or F_SETLK, &filelock);
+
+ close(dmabuf fd) call would also unlock the dma buf. And for more
+ detail, please refer to [3]
+
+
+References:
+[1] http://lwn.net/Articles/470339/
+[2] https://lkml.org/lkml/2014/2/24/824
+[3] http://linux.die.net/man/2/fcntl
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 57a675f90cd..8e90146c562 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1 +1 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-y := dma-buf.o fence.o reservation.o seqno-fence.o dmabuf-sync.o
diff --git a/drivers/dma-buf/dmabuf-sync.c b/drivers/dma-buf/dmabuf-sync.c
new file mode 100644
index 00000000000..790383855a0
--- /dev/null
+++ b/drivers/dma-buf/dmabuf-sync.c
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Chanho Park <chanho61.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+
+#include <linux/seqno-fence.h>
+#include <linux/reservation.h>
+#include <linux/dmabuf-sync.h>
+
+#define DEFAULT_SYNC_TIMEOUT 5000 /* in millisecond. */
+#define DEFAULT_WORKER_TIMEOUT 500 /* in millisecond. */
+#define BEGIN_CPU_ACCESS(old, new_type) \
+ ((old->accessed_type & DMA_BUF_ACCESS_DMA_W) == \
+ DMA_BUF_ACCESS_DMA_W && new_type == DMA_BUF_ACCESS_R)
+
+#define END_CPU_ACCESS(old, new_type) \
+ (((old->accessed_type == DMA_BUF_ACCESS_W) || \
+ (old->accessed_type == DMA_BUF_ACCESS_RW)) && \
+ new_type & DMA_BUF_ACCESS_DMA)
+
+static int dmabuf_sync_enabled = 1;
+static unsigned long seqno;
+static LIST_HEAD(orders);
+static DEFINE_SPINLOCK(orders_lock);
+static struct timer_list fence_free_worker;
+
+MODULE_PARM_DESC(enabled, "Check if dmabuf sync is supported or not");
+module_param_named(enabled, dmabuf_sync_enabled, int, 0444);
+
+static void sobj_release(struct kref *kref)
+{
+ struct dmabuf_sync_object *sobj =
+ container_of(kref, struct dmabuf_sync_object, refcount);
+
+ fence_put(&sobj->sfence->base);
+ kfree(sobj);
+}
+
+/*
+ * - sobj_get - increases refcount of the dmabuf_sync_object
+ * @sobj: [in] sync object to increase refcount of
+ */
+static inline void sobj_get(struct dmabuf_sync_object *sobj)
+{
+ if (sobj)
+ kref_get(&sobj->refcount);
+}
+
+/*
+ * sobj_put - decreases refcount of the dmabuf_sync_object
+ * @sobj: [in] sync object to reduce refcount of
+ */
+static inline void sobj_put(struct dmabuf_sync_object *sobj)
+{
+ if (sobj)
+ kref_put(&sobj->refcount, sobj_release);
+}
+
+
+static const char *dmabuf_sync_get_driver_name(struct fence *fence)
+{
+ return NULL;
+}
+
+static const char *dmabuf_sync_get_timeline_name(struct fence *fence)
+{
+ return NULL;
+}
+
+static bool dmabuf_sync_enable_sw_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static void sync_free_worker_handler(unsigned long data)
+{
+ struct dmabuf_sync *sync = (struct dmabuf_sync *)data;
+
+ kfree(sync);
+}
+
+static void setup_sync_free_worker(struct dmabuf_sync *sync)
+{
+ init_timer(&sync->sync_free_worker);
+ sync->sync_free_worker.expires = jiffies +
+ msecs_to_jiffies(DEFAULT_WORKER_TIMEOUT);
+ sync->sync_free_worker.data = (unsigned long)sync;
+ sync->sync_free_worker.function = sync_free_worker_handler;
+ add_timer(&sync->sync_free_worker);
+}
+
+static void sfence_object_release(struct fence *fence)
+{
+ struct seqno_fence *sf = to_seqno_fence(fence);
+ struct dmabuf_sync *sync = to_dmabuf_sync(sf);
+
+ /* TODO. find out better way. */
+ setup_sync_free_worker(sync);
+}
+
+static const struct fence_ops fence_default_ops = {
+ .get_driver_name = dmabuf_sync_get_driver_name,
+ .get_timeline_name = dmabuf_sync_get_timeline_name,
+ .enable_signaling = dmabuf_sync_enable_sw_signaling,
+ .wait = fence_default_wait,
+ .release = sfence_object_release,
+};
+
+/**
+ * dmabuf_sync_is_supported - Check if dmabuf sync is supported or not.
+ */
+bool dmabuf_sync_is_supported(void)
+{
+ return dmabuf_sync_enabled == 1;
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_is_supported);
+
+/*
+ * Perform cache operation according to access type.
+ *
+ * This function is called by dmabuf_sync_wait or dmabuf_sync_wait_all function
+ * to change the ownership of a buffer - for cache coherence between CPU and
+ * DMA -, CPU -> DMA or DMA -> CPU after signaled.
+ */
+static void dmabuf_sync_cache_ops(struct dmabuf_sync_object *sobj)
+{
+ struct dma_buf *dmabuf = sobj->dmabuf;
+
+ /* It doesn't need cache operation if access first time. */
+ if (!dmabuf->resv->accessed_type)
+ goto out;
+
+ if (END_CPU_ACCESS(dmabuf->resv, sobj->access_type))
+ /* cache clean */
+ dma_buf_end_cpu_access(dmabuf, 0, dmabuf->size,
+ DMA_TO_DEVICE);
+ else if (BEGIN_CPU_ACCESS(dmabuf->resv, sobj->access_type))
+ /* cache invalidate */
+ dma_buf_begin_cpu_access(dmabuf, 0, dmabuf->size,
+ DMA_FROM_DEVICE);
+
+out:
+ /* Update access type to new one. */
+ dmabuf->resv->accessed_type = sobj->access_type;
+}
+
+/**
+ * dmabuf_sync_check - check whether a given buffer is required sync operation.
+ *
+ * @dmabuf: An object to dma_buf structure.
+ *
+ * This function should be called by dmabuf_sync_wait or dmabuf_sync_wait_all
+ * function before it makes current thread to be blocked. It returnes true if
+ * buffer sync is needed else false.
+ */
+struct fence *dmabuf_sync_check(struct dma_buf *dmabuf)
+{
+ struct reservation_object_list *rol;
+ struct reservation_object *ro;
+ struct fence *fence = NULL;
+ unsigned int i;
+
+ rcu_read_lock();
+ ro = rcu_dereference(dmabuf->resv);
+ if (!ro)
+ goto unlock_rcu;
+
+ /* First, check there is a fence for write access. */
+ fence = rcu_dereference(ro->fence_excl);
+ if (fence)
+ goto unlock_rcu;
+
+ /* Check if there is any fences requested for read access. */
+ if (ro->fence) {
+ rol = rcu_dereference(ro->fence);
+
+ for (i = 0; i < rol->shared_count; i++) {
+ fence = rcu_dereference(rol->shared[i]);
+ if (fence)
+ break;
+ }
+ }
+
+unlock_rcu:
+ rcu_read_unlock();
+ return fence;
+}
+
+/**
+ * dmabuf_sync_init - Allocate and initialize a dmabuf sync.
+ *
+ * @name: A dmabuf_sync object name.
+ * @ops: operation callback specitic to device.
+ * @priv: A device private data.
+ *
+ * This function should be called by DMA driver after device context
+ * is created.
+ * The created dmabuf_sync object should be set to the context of driver.
+ * Each DMA driver and task should have one dmabuf_sync object.
+ *
+ * The caller can get a new dmabuf_sync object for buffer synchronization
+ * through this function, which is used when driver drivers use dmabuf sync
+ * APIs.
+ * It returns dmabuf_sync object if true, else error pointer.
+ */
+struct dmabuf_sync *dmabuf_sync_init(const char *name,
+ struct dmabuf_sync_priv_ops *ops,
+ void *priv)
+{
+ struct dmabuf_sync *sync;
+
+ sync = kzalloc(sizeof(*sync), GFP_KERNEL);
+ if (!sync)
+ return ERR_PTR(-ENOMEM);
+
+ strncpy(sync->name, name, DMABUF_SYNC_NAME_SIZE);
+
+ sync->ops = ops;
+ sync->priv = priv;
+ INIT_LIST_HEAD(&sync->syncs);
+ spin_lock_init(&sync->lock);
+ spin_lock_init(&sync->flock);
+
+ sync->sfence.ops = &fence_default_ops;
+ fence_init(&sync->sfence.base, &seqno_fence_ops, &sync->flock,
+ (unsigned)priv, ++seqno);
+
+ return sync;
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_init);
+
+/*
+ * dmabuf_sync_get_obj - Add a given object to syncs list.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ * @dmabuf: An object to dma_buf structure.
+ * @ctx: not used yet.
+ * @type: A access type to a dma buf.
+ * The DMA_BUF_ACCESS_R means that this dmabuf could be accessed by
+ * others for read access. On the other hand, the DMA_BUF_ACCESS_W
+ * means that this dmabuf couldn't be accessed by others but would be
+ * accessed by caller's dma exclusively. And the DMA_BUF_ACCESS_DMA can be
+ * combined.
+ *
+ * This function creates and initializes a new dmabuf_sync_object and adds
+ * the object to a given syncs list. A dmabuf_sync_object has an dmabuf object.
+ */
+static int dmabuf_sync_get_obj(struct dmabuf_sync *sync, struct dma_buf *dmabuf,
+ unsigned int ctx, unsigned int type)
+{
+ struct dmabuf_sync_object *sobj;
+ unsigned long s_flags;
+
+ if (!sync)
+ return -EFAULT;
+
+ if (!IS_VALID_DMA_BUF_ACCESS_TYPE(type))
+ return -EINVAL;
+
+ if ((type & DMA_BUF_ACCESS_RW) == DMA_BUF_ACCESS_RW)
+ type &= ~DMA_BUF_ACCESS_R;
+
+ sobj = kzalloc(sizeof(*sobj), GFP_KERNEL);
+ if (!sobj)
+ return -ENOMEM;
+
+ kref_init(&sobj->refcount);
+ sobj->access_type = type;
+ sobj->sfence = &sync->sfence;
+ fence_get(&sobj->sfence->base);
+ sobj->dmabuf = dmabuf;
+ get_dma_buf(dmabuf);
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ sync->obj_cnt++;
+ list_add_tail(&sobj->l_head, &sync->syncs);
+ sobj_get(sobj);
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ return 0;
+}
+
+/*
+ * dmabuf_sync_put_objs - Release all sync objects of dmabuf_sync.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ *
+ * This function should be called if some operation failed after
+ * dmabuf_sync_get_obj call to release all sync objects or
+ * after device driver or task completes the use to all buffers.
+ */
+static void dmabuf_sync_put_objs(struct dmabuf_sync *sync)
+{
+ struct dmabuf_sync_object *sobj, *next;
+ unsigned long s_flags;
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ list_for_each_entry_safe(sobj, next, &sync->syncs, l_head) {
+
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ sync->obj_cnt--;
+ list_del_init(&sobj->l_head);
+ fence_put(&sobj->sfence->base);
+ sobj_put(sobj);
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ }
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+}
+
+/*
+ * dmabuf_sync_put_obj - Release a given sync object.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ * @dmabuf: A dmabuf object to be released.
+ *
+ * This function should be called if some operation failed after
+ * dmabuf_sync_get_obj call to release a given sync object or
+ * after device driver or task completes the use of a buffer.
+ */
+static void dmabuf_sync_put_obj(struct dmabuf_sync *sync,
+ struct dma_buf *dmabuf)
+{
+ struct dmabuf_sync_object *sobj, *next;
+ unsigned long s_flags;
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ list_for_each_entry_safe(sobj, next, &sync->syncs, l_head) {
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ if (sobj->dmabuf != dmabuf)
+ continue;
+
+ sync->obj_cnt--;
+ list_del_init(&sobj->l_head);
+ fence_put(&sobj->sfence->base);
+ sobj_put(sobj);
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ break;
+ }
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+}
+
+/**
+ * dmabuf_sync_get - Add a given dmabuf object to dmabuf_sync object.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ * @sync_buf: A dmabuf object to be synchronized with others.
+ * @ctx: not used yet.
+ * @type: A access type to a dma buf.
+ * The DMA_BUF_ACCESS_R means that this dmabuf could be accessed by
+ * others for read access. On the other hand, the DMA_BUF_ACCESS_W
+ * means that this dmabuf couldn't be accessed by others but would be
+ * accessed by caller's dma exclusively. And the DMA_BUF_ACCESS_DMA can
+ * be combined with other.
+ *
+ * This function should be called after dmabuf_sync_init function is called.
+ * The caller can tie up multiple dmabufs into its own dmabuf_sync object
+ * by calling this function several times.
+ */
+int dmabuf_sync_get(struct dmabuf_sync *sync, void *sync_buf,
+ unsigned int ctx, unsigned int type)
+{
+ if (!sync || !sync_buf)
+ return -EFAULT;
+
+ return dmabuf_sync_get_obj(sync, sync_buf, ctx, type);
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_get);
+
+/**
+ * dmabuf_sync_put - Put dmabuf sync object to a given dmabuf.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ * @dmabuf: An dmabuf object.
+ *
+ * This function should be called if some operation failed after
+ * dmabuf_sync_get function is called to release the dmabuf or
+ * after device driver or task completes the use of the dmabuf.
+ */
+void dmabuf_sync_put(struct dmabuf_sync *sync, struct dma_buf *dmabuf)
+{
+ unsigned long flags;
+
+ if (!sync || !dmabuf) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_irqsave(&sync->lock, flags);
+ if (list_empty(&sync->syncs)) {
+ spin_unlock_irqrestore(&sync->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&sync->lock, flags);
+
+ dmabuf_sync_put_obj(sync, dmabuf);
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_put);
+
+/**
+ * dmabuf_sync_put_all - Release all dmabuf_sync_objects to a given
+ * dmabuf_sync object.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ *
+ * This function should be called if some operation is failed after
+ * dmabuf_sync_get function is called to release all dmabuf_sync_object
+ * objects, or after DMA driver or task completes the use of all dmabufs.
+ */
+void dmabuf_sync_put_all(struct dmabuf_sync *sync)
+{
+ unsigned long flags;
+
+ if (!sync) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_irqsave(&sync->lock, flags);
+ if (list_empty(&sync->syncs)) {
+ spin_unlock_irqrestore(&sync->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&sync->lock, flags);
+
+ dmabuf_sync_put_objs(sync);
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_put_all);
+
+static void dmabuf_sync_debug_fence(struct dmabuf_sync_object *sobj)
+{
+ struct dma_buf *dmabuf = sobj->dmabuf;
+ struct reservation_object *resv;
+ struct fence *fence_excl, **shared;
+ unsigned int shared_count, i;
+
+ rcu_read_lock();
+
+ resv = rcu_dereference(dmabuf->resv);
+ reservation_object_get_fences_rcu(resv,
+ &fence_excl,
+ &shared_count,
+ &shared);
+
+ printk("%s: new fence = 0x%p(type = %d), ", __func__,
+ &sobj->sfence->base,
+ sobj->access_type);
+
+
+ fence_excl = rcu_dereference(resv->fence_excl);
+ if (fence_excl) {
+ printk("excl_fence = 0x%p(%d), ", fence_excl,
+ atomic_read(&fence_excl->refcount.refcount));
+ fence_put(fence_excl);
+ }
+
+ for (i = 0; i < shared_count; i++) {
+ if (shared[i]) {
+ printk("shared[%d] = 0x%p(%d), ",
+ i, shared[i],
+ atomic_read(&shared[i]->refcount.refcount));
+ fence_put(shared[i]);
+ }
+ }
+
+ rcu_read_unlock();
+
+ printk("shared_count = %d\n", shared_count);
+}
+
+static void fence_free_worker_handler(unsigned long data)
+{
+ struct dma_buf *dmabuf = (struct dma_buf *)data;
+ struct reservation_object *resv = dmabuf->resv;
+
+ if (resv && resv->fence_excl) {
+ struct fence *fence_excl = resv->fence_excl;
+
+ if (atomic_read(&fence_excl->refcount.refcount) > 0)
+ fence_put(fence_excl);
+ }
+
+ dma_buf_put(dmabuf);
+}
+
+static void setup_fence_free_worker(struct dmabuf_sync_object *sobj)
+{
+ if (timer_pending(&fence_free_worker)) {
+ del_timer(&fence_free_worker);
+ dma_buf_put(sobj->dmabuf);
+ }
+
+ get_dma_buf(sobj->dmabuf);
+
+ init_timer(&fence_free_worker);
+ fence_free_worker.expires = jiffies +
+ msecs_to_jiffies(DEFAULT_WORKER_TIMEOUT);
+ fence_free_worker.data = (unsigned long)sobj->dmabuf;
+ fence_free_worker.function = fence_free_worker_handler;
+ add_timer(&fence_free_worker);
+}
+
+static void cancel_fence_free_worker(struct dmabuf_sync_object *sobj)
+{
+ if (timer_pending(&fence_free_worker)) {
+ del_timer(&fence_free_worker);
+ dma_buf_put(sobj->dmabuf);
+ }
+}
+
+/*
+ * dmabuf_sync_update - register a fence object to reservation_object.
+ *
+ * @sobj: An object o dmabuf_sync_object.
+ *
+ * This function is called by dmabuf_sync_wait and dmabuf_sync_wait_all
+ * functions when there is no any thread accessing the same dmabuf or
+ * after current thread is waked up, Which means that current thread has
+ * ownership of the dmabuf.
+ */
+static void dmabuf_sync_update(struct dmabuf_sync_object *sobj)
+{
+ struct seqno_fence *sf = sobj->sfence;
+ struct dma_buf *dmabuf = sobj->dmabuf;
+
+ if (sobj->access_type & DMA_BUF_ACCESS_R) {
+ struct reservation_object_list *fobj;
+
+ fobj = reservation_object_get_list(dmabuf->resv);
+
+ /*
+ * Reserve spaces for shared fences if this is the first call
+ * or there is no space for a new fence.
+ */
+ if (!fobj || (fobj && fobj->shared_count == fobj->shared_max))
+ reservation_object_reserve_shared(dmabuf->resv);
+
+ reservation_object_add_shared_fence(dmabuf->resv, &sf->base);
+
+ /*
+ * Add timer handler, which is used to drop a reference
+ * of fence object after sync operation is completed.
+ *
+ * dmabuf_sync_update request with a read access doesn't drop
+ * a reference of old excl_fence. For more details, see
+ * below functions of reservation module,
+ * - reservation_object_add_shared_inplace()
+ * - reservation_object_add_shared_replace()
+ *
+ * P.S. a reference of old excl_fence is dropped by only
+ * reservation_object_add_excl_fence(), which is called
+ * by when dmabuf_sync_upate is called with a write access.
+ * TODO. find out better way.
+ */
+ setup_fence_free_worker(sobj);
+ } else {
+ reservation_object_add_excl_fence(dmabuf->resv, &sf->base);
+
+ /*
+ * In this case, each reference of all old fences, shared and
+ * excl, must be dropped by above function,
+ * reservation_object_add_excl_fence(), so cancel a timer.
+ * TODO. find out better way.
+ */
+ cancel_fence_free_worker(sobj);
+ }
+}
+
+static void remove_obj_from_req_queue(struct dmabuf_sync_object *csobj)
+{
+ struct dmabuf_sync_object *sobj, *next;
+ unsigned long o_flags;
+
+ spin_lock_irqsave(&orders_lock, o_flags);
+ if (list_empty(&orders)) {
+ /* There should be no such case. */
+ WARN_ON(1);
+ goto out;
+ }
+
+ list_for_each_entry_safe(sobj, next, &orders, g_head) {
+ if (sobj == csobj) {
+ list_del_init(&sobj->g_head);
+ sobj_put(sobj);
+ break;
+ }
+
+ }
+out:
+ spin_unlock_irqrestore(&orders_lock, o_flags);
+}
+
+/*
+ * is_higher_priority_than_current - check if a given sobj was requested
+ * the first.
+ *
+ * @csobj: An object to dmabuf_sync_object for checking if buffer sync was
+ * requested the first for buffer sync.
+ *
+ * This function can be called by dmabuf_sync_wait and dmabuf_sync_wait_all
+ * to check if there is any object that buffer sync was requested earlier
+ * than current thread.
+ * If there is a object, the current thread doesn't have the ownership of
+ * the dmabuf so will try to check the priority again to yield the ownership
+ * to other threads that requested buffer sync earlier than current thread.
+ */
+static bool is_higher_priority_than_current(struct dma_buf *dmabuf,
+ struct dmabuf_sync_object *csobj)
+{
+ struct dmabuf_sync_object *sobj;
+ unsigned long o_flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&orders_lock, o_flags);
+ if (list_empty(&orders)) {
+ /* There should be no such case. */
+ WARN_ON(1);
+ goto out;
+ }
+
+ list_for_each_entry(sobj, &orders, g_head) {
+ if (sobj != csobj)
+ ret = true;
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&orders_lock, o_flags);
+ return ret;
+}
+
+/**
+ * dmabuf_sync_wait_all - Wait for the completion of DMA or CPU access to
+ * all dmabufs.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ *
+ * The caller should call this function prior to CPU or DMA access to
+ * dmabufs so that other CPU or DMA device cannot access the dmabufs.
+ */
+long dmabuf_sync_wait_all(struct dmabuf_sync *sync)
+{
+ struct dmabuf_sync_object *sobj;
+ unsigned long timeout = 0;
+ unsigned long s_flags;
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ list_for_each_entry(sobj, &sync->syncs, l_head) {
+ struct dma_buf *dmabuf;
+ struct seqno_fence *sf;
+ unsigned long o_flags;
+ bool all_wait;
+
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ /*
+ * orders is used to check if there is any sobj
+ * of other thread that requested earlier buffer sync than
+ * current thread.
+ */
+ spin_lock_irqsave(&orders_lock, o_flags);
+ list_add_tail(&sobj->g_head, &orders);
+ sobj_get(sobj);
+ spin_unlock_irqrestore(&orders_lock, o_flags);
+
+ sf = sobj->sfence;
+ dmabuf = sobj->dmabuf;
+
+ /*
+ * It doesn't need to wait for other thread or threads
+ * if there is no any sync object which has higher priority
+ * than this one, sobj so go update a fence.
+ */
+ if (!is_higher_priority_than_current(dmabuf, sobj))
+ goto out_enable_signal;
+
+ all_wait = sobj->access_type & DMA_BUF_ACCESS_W;
+
+go_back_to_wait:
+ /*
+ * Need to wait for all buffers for a read or a write
+ * if it should access a buffer for a write.
+ * Otherwise, just wait for only the completion of a buffer
+ * access for a write.
+ *
+ * P.S. the references of previous fence objects for a read
+ * or a write will be dropped in this function so these
+ * fence objects could be freed here.
+ */
+ timeout = reservation_object_wait_timeout_rcu(dmabuf->resv,
+ all_wait, true,
+ msecs_to_jiffies(DEFAULT_SYNC_TIMEOUT));
+ if (!timeout)
+ pr_warning("[DMA] signal wait has been timed out.\n");
+
+ /*
+ * Check if there is any sobj of other thread that requested
+ * earlier than current thread. If other sobj, then it should
+ * yield the ownership of a buffer to other thread for buffer
+ * access ordering so go wait for the thread.
+ */
+ if (is_higher_priority_than_current(dmabuf, sobj))
+ goto go_back_to_wait;
+
+out_enable_signal:
+ fence_enable_sw_signaling(&sf->base);
+ dmabuf_sync_update(sobj);
+ dmabuf_sync_cache_ops(sobj);
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ }
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ return timeout;
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_wait_all);
+
+/**
+ * dmabuf_sync_wait - Wait for the completion of DMA or CPU access to a dmabuf.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ *
+ * The caller should call this function prior to CPU or DMA access to
+ * a dmabuf so that other CPU or DMA device cannot access the dmabuf.
+ */
+long dmabuf_sync_wait(struct dma_buf *dmabuf, unsigned int ctx,
+ unsigned int access_type)
+{
+ struct dmabuf_sync_object *sobj;
+ struct dmabuf_sync *sync;
+ unsigned long timeout = 0;
+ bool all_wait;
+ unsigned long o_flags;
+
+ if (!IS_VALID_DMA_BUF_ACCESS_TYPE(access_type))
+ return -EINVAL;
+
+ if ((access_type & DMA_BUF_ACCESS_RW) == DMA_BUF_ACCESS_RW)
+ access_type &= ~DMA_BUF_ACCESS_R;
+
+ sync = kzalloc(sizeof(*sync), GFP_KERNEL);
+ if (!sync)
+ return -ENOMEM;
+
+ sobj = kzalloc(sizeof(*sobj), GFP_KERNEL);
+ if (!sobj) {
+ kfree(sync);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&sync->flock);
+
+ kref_init(&sobj->refcount);
+ sobj->access_type = access_type;
+ sobj->sfence = &sync->sfence;
+ sobj->dmabuf = dmabuf;
+ sync->single_sobj = sobj;
+ seqno_fence_init(&sync->sfence, &sync->flock, dmabuf, ctx, 0,
+ ++seqno, 0, &fence_default_ops);
+
+ spin_lock_irqsave(&orders_lock, o_flags);
+ list_add_tail(&sobj->g_head, &orders);
+ sobj_get(sobj);
+ spin_unlock_irqrestore(&orders_lock, o_flags);
+
+ /*
+ * It doesn't need to wait for other thread or threads
+ * if there is no any sync object which has higher priority
+ * than this one, sobj so go update a fence.
+ */
+ if (!is_higher_priority_than_current(dmabuf, sobj))
+ goto out_enable_signal;
+
+ all_wait = access_type & DMA_BUF_ACCESS_W;
+
+go_back_to_wait:
+ /*
+ * Need to wait for all buffers for a read or a write
+ * if it should access a buffer for a write.
+ * Otherwise, just wait for only the completion of a buffer
+ * access for a write.
+ *
+ * P.S. the references of previous fence objects for a read
+ * or a write will be dropped in this function so these
+ * fence objects could be freed here.
+ */
+ timeout = reservation_object_wait_timeout_rcu(dmabuf->resv,
+ all_wait, true,
+ msecs_to_jiffies(DEFAULT_SYNC_TIMEOUT));
+ if (!timeout)
+ pr_warning("[CPU] signal wait has been timed out.\n");
+
+ /*
+ * Check if there is any sobj of other thread that requested
+ * earlier than current thread. If other sobj, then it should
+ * yield the ownership of a buffer to other thread for buffer
+ * access ordering so go wait for the thread.
+ */
+ if (is_higher_priority_than_current(dmabuf, sobj))
+ goto go_back_to_wait;
+
+out_enable_signal:
+ fence_enable_sw_signaling(&sync->sfence.base);
+ dmabuf_sync_update(sobj);
+ dmabuf_sync_cache_ops(sobj);
+
+ return timeout;
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_wait);
+
+/**
+ * dmabuf_sync_signal_all - Wake up all threads blocked when they tried to
+ * access dmabufs registered to a given
+ * dmabuf_sync object.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ *
+ * The caller should call this function after CPU or DMA access to
+ * the dmabufs is completed so that other CPU and DMA device can access
+ * the dmabufs.
+ */
+int dmabuf_sync_signal_all(struct dmabuf_sync *sync)
+{
+ struct dmabuf_sync_object *sobj;
+ unsigned long s_flags;
+ int ret = -EAGAIN;
+
+ rcu_read_lock();
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ list_for_each_entry(sobj, &sync->syncs, l_head) {
+ struct fence *fence;
+ unsigned long flags;
+
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ fence = &sobj->sfence->base;
+ fence = rcu_dereference(fence);
+
+ remove_obj_from_req_queue(sobj);
+
+ spin_lock_irqsave(fence->lock, flags);
+
+ /*
+ * Drop a reference if there is no any task waiting for signal.
+ * if any task then a reference of this fence will be dropped
+ * by that the task calls dmabuf_sync_update() after waked up.
+ */
+ if (list_empty(&fence->cb_list))
+ fence_put(fence);
+
+ ret = fence_signal_locked(fence);
+ if (ret) {
+ pr_warning("signal request has been failed.\n");
+ spin_unlock_irqrestore(fence->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ sobj_put(sobj);
+
+ /*
+ * Set sync_but, which is a signaled buffer recently.
+ *
+ * When seqno_release is called, dma_buf_put is called
+ * with seqno_fence->sync_buf.
+ */
+ sobj->sfence->sync_buf = sobj->dmabuf;
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ }
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_signal_all);
+
+static int dmabuf_sync_signal_fence(struct fence *fence)
+{
+ struct dmabuf_sync *sync;
+ struct seqno_fence *sf;
+ unsigned long flags;
+ int ret;
+
+ sf = to_seqno_fence(fence);
+ sync = to_dmabuf_sync(sf);
+
+ remove_obj_from_req_queue(sync->single_sobj);
+
+ rcu_read_unlock();
+
+ spin_lock_irqsave(fence->lock, flags);
+
+ /*
+ * Drop a reference if there is no any task waiting for signal.
+ * if any task then a reference of this fence will be dropped
+ * by that the task calls dmabuf_sync_update() after waked up.
+ */
+ if (list_empty(&fence->cb_list))
+ fence_put(fence);
+
+ ret = fence_signal_locked(fence);
+ if (ret)
+ pr_warning("signal request has been failed.\n");
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ sobj_put(sync->single_sobj);
+
+ rcu_read_lock();
+
+ return ret;
+}
+
+/**
+ * dmabuf_sync_signal - Wake up all threads blocked when they tried to access
+ * a given dmabuf.
+ *
+ * @dmabuf: An object to dma_buf structure.
+ *
+ * The caller should call this function after CPU or DMA access to the dmabuf
+ * is completed so that other CPU and DMA device can access the dmabuf.
+ */
+int dmabuf_sync_signal(struct dma_buf *dmabuf)
+{
+ struct reservation_object_list *rol;
+ struct reservation_object *ro;
+ struct fence *fence;
+ int i;
+
+ rcu_read_lock();
+
+ ro = rcu_dereference(dmabuf->resv);
+
+ /* Was it a fence for a write? */
+ fence = rcu_dereference(ro->fence_excl);
+ if (fence) {
+ if (fence->context == (unsigned int)current) {
+ /*
+ * If this fence is already signaled,
+ * this context has its own shared fence
+ * so go check the shared fence.
+ */
+ if (fence_is_signaled(fence)) {
+ fence = NULL;
+ goto out_to_shared;
+ }
+ dmabuf_sync_signal_fence(fence);
+ goto out;
+ } else
+ fence = NULL;
+ }
+
+out_to_shared:
+ rol = rcu_dereference(ro->fence);
+ if (!rol)
+ goto out;
+
+ /* Was it a fence for a read? */
+ for (i = 0; i < rol->shared_count; i++) {
+ fence = rcu_dereference(rol->shared[i]);
+ if (!fence)
+ continue;
+
+ if (fence && fence->context != (unsigned int)current) {
+ fence = NULL;
+ continue;
+ }
+
+ /* There should be no such case. */
+ if (fence_is_signaled(fence))
+ WARN_ON(1);
+
+ break;
+ }
+
+ if (fence)
+ dmabuf_sync_signal_fence(fence);
+ else
+ WARN_ON(1);
+
+out:
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_signal);
+
+/**
+ * dmabuf_sync_fini - Release a given dmabuf_sync object and relevant to it.
+ *
+ * @sync: An object to dmabuf_sync structure.
+ *
+ * This function should be called if some operation failed after
+ * dmabuf_sync_init call to release relevant resources, and after
+ * dmabuf_sync_signal or dmabuf_sync_signal_all function is called.
+ */
+void dmabuf_sync_fini(struct dmabuf_sync *sync)
+{
+ unsigned long s_flags;
+
+ if (WARN_ON(!sync))
+ return;
+
+ spin_lock_irqsave(&sync->lock, s_flags);
+ if (list_empty(&sync->syncs))
+ goto free_sync;
+
+ /*
+ * It considers a case that a caller never call dmabuf_sync_put_all().
+ */
+ dmabuf_sync_put_all(sync);
+
+free_sync:
+ spin_unlock_irqrestore(&sync->lock, s_flags);
+
+ if (sync->ops && sync->ops->free)
+ sync->ops->free(sync->priv);
+}
+EXPORT_SYMBOL_GPL(dmabuf_sync_fini);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 22a8d1b8f64..e4f187d6285 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -315,21 +315,17 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
}
obj = &exynos_fb->exynos_gem_obj[i]->base;
- if (!obj->export_dma_buf)
+ if (!obj->dma_buf)
continue;
- /*
- * set dmabuf to fence and registers reservation
- * object to reservation entry.
- */
ret = dmabuf_sync_get(sync,
- obj->export_dma_buf,
+ obj->dma_buf, (unsigned int)crtc,
DMA_BUF_ACCESS_DMA_R);
if (WARN_ON(ret < 0))
continue;
}
- ret = dmabuf_sync_lock(sync);
+ ret = dmabuf_sync_wait_all(sync);
if (ret < 0) {
dmabuf_sync_put_all(sync);
dmabuf_sync_fini(sync);
@@ -525,7 +521,7 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
sync = list_first_entry(&exynos_crtc->sync_committed,
struct dmabuf_sync, list);
- if (!dmabuf_sync_unlock(sync)) {
+ if (!dmabuf_sync_signal_all(sync)) {
list_del_init(&sync->list);
dmabuf_sync_put_all(sync);
dmabuf_sync_fini(sync);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 78c6ee902ad..305f76d4de7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -198,7 +198,7 @@ void *exynos_drm_dmabuf_sync_work(struct drm_framebuffer *fb)
}
- ret = dmabuf_sync_lock(sync);
+ ret = dmabuf_sync_wait_all(sync);
if (ret < 0) {
dmabuf_sync_put_all(sync);
dmabuf_sync_fini(sync);
@@ -207,7 +207,7 @@ void *exynos_drm_dmabuf_sync_work(struct drm_framebuffer *fb)
}
/* buffer synchronization is done by wait_for_vblank so just signal. */
- dmabuf_sync_unlock(sync);
+ dmabuf_sync_signal_all(sync);
dmabuf_sync_put_all(sync);
dmabuf_sync_fini(sync);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 90432d3ff77..0232a724ab2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -696,11 +696,12 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
struct g2d_buf_info *buf_info = &node->buf_info;
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
int offset;
int ret;
int i;
- if (is_dmabuf_sync_supported()) {
+ if (dmabuf_sync_is_supported()) {
node->sync = dmabuf_sync_init("g2d", &dmabuf_sync_ops, node);
if (IS_ERR(node->sync))
node->sync = NULL;
@@ -745,7 +746,9 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
else
type = DMA_BUF_ACCESS_DMA_R;
- ret = dmabuf_sync_get(node->sync, dma_buf, type);
+ ret = dmabuf_sync_get(node->sync, dma_buf,
+ (unsigned int)file_priv->g2d_priv,
+ type);
if (ret < 0) {
WARN_ON(1);
dmabuf_sync_put_all(node->sync);
@@ -863,7 +866,7 @@ static void g2d_dma_start(struct g2d_data *g2d,
if (node->sync) {
int ret;
- ret = dmabuf_sync_lock(node->sync);
+ ret = dmabuf_sync_wait_all(node->sync);
if (ret < 0) {
WARN_ON(1);
dmabuf_sync_put_all(node->sync);
@@ -909,7 +912,7 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d,
if (node->sync) {
int ret;
- ret = dmabuf_sync_unlock(node->sync);
+ ret = dmabuf_sync_signal_all(node->sync);
if (ret < 0) {
WARN_ON(1);
/* TODO */
diff --git a/include/linux/dmabuf-sync.h b/include/linux/dmabuf-sync.h
new file mode 100644
index 00000000000..9ce13812d09
--- /dev/null
+++ b/include/linux/dmabuf-sync.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Chango Park <chanho61.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/seqno-fence.h>
+
+#define DMABUF_SYNC_NAME_SIZE 256
+#define DMA_BUF_ACCESS_R 0x1
+#define DMA_BUF_ACCESS_W 0x2
+#define DMA_BUF_ACCESS_DMA 0x4
+#define DMA_BUF_ACCESS_RW (DMA_BUF_ACCESS_R | DMA_BUF_ACCESS_W)
+#define DMA_BUF_ACCESS_DMA_R (DMA_BUF_ACCESS_R | DMA_BUF_ACCESS_DMA)
+#define DMA_BUF_ACCESS_DMA_W (DMA_BUF_ACCESS_W | DMA_BUF_ACCESS_DMA)
+#define DMA_BUF_ACCESS_DMA_RW (DMA_BUF_ACCESS_DMA_R | DMA_BUF_ACCESS_DMA_W)
+#define IS_VALID_DMA_BUF_ACCESS_TYPE(t) (t == DMA_BUF_ACCESS_R || \
+ t == DMA_BUF_ACCESS_W || \
+ t == DMA_BUF_ACCESS_DMA_R || \
+ t == DMA_BUF_ACCESS_DMA_W || \
+ t == DMA_BUF_ACCESS_RW || \
+ t == DMA_BUF_ACCESS_DMA_RW)
+
+/*
+ * A structure for dmabuf_sync_object.
+ *
+ * @head: A list head to be added to dmabuf_sync's syncs.
+ * @access_type: Indicate how a current task tries to access
+ * a given buffer, and one of the below types could be set.
+ * DMA_BUF_ACCESS_R -> CPU access for read.
+ * DMA_BUF_ACCRSS_W -> CPU access for write.
+ * DMA_BUF_ACCESS_R | DMA_BUF_ACCESS_DMA -> DMA access for read.
+ * DMA_BUF_ACCESS_W | DMA_BUF_ACCESS_DMA -> DMA access for write.
+ */
+struct dmabuf_sync_object {
+ struct kref refcount;
+ struct list_head l_head;
+ struct list_head g_head;
+ struct seqno_fence *sfence;
+ struct dma_buf *dmabuf;
+ unsigned int access_type;
+};
+
+struct dmabuf_sync_priv_ops {
+ void (*free)(void *priv);
+};
+
+/*
+ * A structure for dmabuf_sync.
+ *
+ * @syncs: A list head to sync object and this is global to system.
+ * This contains sync objects of dmabuf_sync owner.
+ * @priv: A private data.
+ * @name: A string to dmabuf sync owner.
+ */
+struct dmabuf_sync {
+ struct list_head list;
+ struct list_head syncs;
+ struct seqno_fence sfence;
+ unsigned int obj_cnt;
+ struct dmabuf_sync_priv_ops *ops;
+ char name[DMABUF_SYNC_NAME_SIZE];
+ struct dmabuf_sync_object *single_sobj;
+ struct timer_list sync_free_worker;
+ spinlock_t lock;
+ spinlock_t flock;
+ void *priv;
+};
+
+bool dmabuf_sync_is_supported(void);
+
+struct dmabuf_sync *dmabuf_sync_init(const char *name,
+ struct dmabuf_sync_priv_ops *ops,
+ void *priv);
+
+void dmabuf_sync_fini(struct dmabuf_sync *sync);
+
+int dmabuf_sync_get(struct dmabuf_sync *sync, void *sync_buf,
+ unsigned int ctx, unsigned int type);
+
+void dmabuf_sync_put(struct dmabuf_sync *sync, struct dma_buf *dmabuf);
+
+void dmabuf_sync_put_all(struct dmabuf_sync *sync);
+
+long dmabuf_sync_wait(struct dma_buf *dmabuf, unsigned int ctx,
+ unsigned int access_type);
+
+long dmabuf_sync_wait_all(struct dmabuf_sync *sync);
+
+int dmabuf_sync_signal(struct dma_buf *dmabuf);
+
+int dmabuf_sync_signal_all(struct dmabuf_sync *sync);
+
+static inline struct dmabuf_sync *to_dmabuf_sync(struct seqno_fence *sf)
+{
+ if (!sf)
+ return NULL;
+
+ return container_of(sf, struct dmabuf_sync, sfence);
+}
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 5a0b64cf68b..77fd051b1a4 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -62,6 +62,7 @@ struct reservation_object {
struct fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
struct reservation_object_list *staged;
+ unsigned int accessed_type;
};
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)