summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2013-07-17 14:40:48 +0900
committerMyungJoo Ham <myungjoo.ham@samsung.com>2013-11-15 13:51:15 +0900
commita2baf3c043ccd2ec94f953719de4d68acca6d7c6 (patch)
treeb1ad17935822107bbf5e06e1ecf7b9f9bb2d932d /include
parent402f7156db51b0a21e71659c23219369808e4bda (diff)
downloadlinux-3.10-a2baf3c043ccd2ec94f953719de4d68acca6d7c6.tar.gz
linux-3.10-a2baf3c043ccd2ec94f953719de4d68acca6d7c6.tar.bz2
linux-3.10-a2baf3c043ccd2ec94f953719de4d68acca6d7c6.zip
dmabuf-sync: add buffer synchronization framework
This patch adds a buffer synchronization framework based on DMA BUF[1] and and based on ww-mutexes[2] for lock mechanism. The purpose of this framework is to provide not only buffer access control to CPU and DMA but also easy-to-use interfaces for device drivers and user application. This framework can be used for all dma devices using system memory as dma buffer, especially for most ARM based SoCs. Changelog v5: - Rmove a dependence on reservation_object: the reservation_object is used to hook up to ttm and dma-buf for easy sharing of reservations across devices. However, the dmabuf sync can be used for all dma devices; v4l2 and drm based drivers, so doesn't need the reservation_object anymore. With regared to this, it adds 'void *sync' to dma_buf structure. - All patches are rebased on mainline, Linux v3.10. Changelog v4: - Add user side interface for buffer synchronization mechanism and update descriptions related to the user side interface. Changelog v3: - remove cache operation relevant codes and update document file. Changelog v2: - use atomic_add_unless to avoid potential bug. - add a macro for checking valid access type. - code clean. The mechanism of this framework has the following steps, 1. Register dmabufs to a sync object - A task gets a new sync object and can add one or more dmabufs that the task wants to access. This registering should be performed when a device context or an event context such as a page flip event is created or before CPU accesses a shared buffer. dma_buf_sync_get(a sync object, a dmabuf); 2. Lock a sync object - A task tries to lock all dmabufs added in its own sync object. Basically, the lock mechanism uses ww-mutex[1] to avoid dead lock issue and for race condition between CPU and CPU, CPU and DMA, and DMA and DMA. Taking a lock means that others cannot access all locked dmabufs until the task that locked the corresponding dmabufs, unlocks all the locked dmabufs. This locking should be performed before DMA or CPU accesses these dmabufs. dma_buf_sync_lock(a sync object); 3. Unlock a sync object - The task unlocks all dmabufs added in its own sync object. The unlock means that the DMA or CPU accesses to the dmabufs have been completed so that others may access them. This unlocking should be performed after DMA or CPU has completed accesses to the dmabufs. dma_buf_sync_unlock(a sync object); 4. Unregister one or all dmabufs from a sync object - A task unregisters the given dmabufs from the sync object. This means that the task dosen't want to lock the dmabufs. The unregistering should be performed after DMA or CPU has completed accesses to the dmabufs or when dma_buf_sync_lock() is failed. dma_buf_sync_put(a sync object, a dmabuf); dma_buf_sync_put_all(a sync object); The described steps may be summarized as: get -> lock -> CPU or DMA access to a buffer/s -> unlock -> put This framework includes the following two features. 1. read (shared) and write (exclusive) locks - A task is required to declare the access type when the task tries to register a dmabuf; READ, WRITE, READ DMA, or WRITE DMA. The below is example codes, struct dmabuf_sync *sync; sync = dmabuf_sync_init(NULL, "test sync"); dmabuf_sync_get(sync, dmabuf, DMA_BUF_ACCESS_R); ... And the below can be used as access types: DMA_BUF_ACCESS_R - CPU will access a buffer for read. DMA_BUF_ACCESS_W - CPU will access a buffer for read or write. DMA_BUF_ACCESS_DMA_R - DMA will access a buffer for read DMA_BUF_ACCESS_DMA_W - DMA will access a buffer for read or write. 2. Mandatory resource releasing - a task cannot hold a lock indefinitely. A task may never try to unlock a buffer after taking a lock to the buffer. In this case, a timer handler to the corresponding sync object is called in five (default) seconds and then the timed-out buffer is unlocked by work queue handler to avoid lockups and to enforce resources of the buffer. The below is how to use interfaces for device driver: 1. Allocate and Initialize a sync object: struct dmabuf_sync *sync; sync = dmabuf_sync_init(NULL, "test sync"); ... 2. Add a dmabuf to the sync object when setting up dma buffer relevant registers: dmabuf_sync_get(sync, dmabuf, DMA_BUF_ACCESS_READ); ... 3. Lock all dmabufs of the sync object before DMA or CPU accesses the dmabufs: dmabuf_sync_lock(sync); ... 4. Now CPU or DMA can access all dmabufs locked in step 3. 5. Unlock all dmabufs added in a sync object after DMA or CPU access to these dmabufs is completed: dmabuf_sync_unlock(sync); And call the following functions to release all resources, dmabuf_sync_put_all(sync); dmabuf_sync_fini(sync); You can refer to actual example codes: "drm/exynos: add dmabuf sync support for g2d driver" and "drm/exynos: add dmabuf sync support for kms framework" from https://git.kernel.org/cgit/linux/kernel/git/daeinki/ drm-exynos.git/log/?h=dmabuf-sync And this framework includes fcntl system call[3] as interfaces exported to user. As you know, user sees a buffer object as a dma-buf file descriptor. So fcntl() call with the file descriptor means to lock some buffer region being managed by the dma-buf object. The below is how to use interfaces for user application: struct flock filelock; 1. Lock a dma buf: filelock.l_type = F_WRLCK or F_RDLCK; /* lock entire region to the dma buf. */ filelock.lwhence = SEEK_CUR; filelock.l_start = 0; filelock.l_len = 0; fcntl(dmabuf fd, F_SETLKW or F_SETLK, &filelock); ... CPU access to the dma buf 2. Unlock a dma buf: filelock.l_type = F_UNLCK; fcntl(dmabuf fd, F_SETLKW or F_SETLK, &filelock); close(dmabuf fd) call would also unlock the dma buf. And for more detail, please refer to [3] References: [1] http://lwn.net/Articles/470339/ [2] https://patchwork.kernel.org/patch/2625361/ [3] http://linux.die.net/man/2/fcntl Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/dma-buf.h16
-rw-r--r--include/linux/dmabuf-sync.h178
2 files changed, 194 insertions, 0 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index dfac5ed3112..01096736d17 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -115,6 +115,7 @@ struct dma_buf_ops {
* @exp_name: name of the exporter; useful for debugging.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
+ * @sync: sync object linked to this dma-buf
*/
struct dma_buf {
size_t size;
@@ -128,6 +129,7 @@ struct dma_buf {
const char *exp_name;
struct list_head list_node;
void *priv;
+ void *sync;
};
/**
@@ -148,6 +150,20 @@ struct dma_buf_attachment {
void *priv;
};
+#define DMA_BUF_ACCESS_R 0x1
+#define DMA_BUF_ACCESS_W 0x2
+#define DMA_BUF_ACCESS_DMA 0x4
+#define DMA_BUF_ACCESS_RW (DMA_BUF_ACCESS_R | DMA_BUF_ACCESS_W)
+#define DMA_BUF_ACCESS_DMA_R (DMA_BUF_ACCESS_R | DMA_BUF_ACCESS_DMA)
+#define DMA_BUF_ACCESS_DMA_W (DMA_BUF_ACCESS_W | DMA_BUF_ACCESS_DMA)
+#define DMA_BUF_ACCESS_DMA_RW (DMA_BUF_ACCESS_DMA_R | DMA_BUF_ACCESS_DMA_W)
+#define IS_VALID_DMA_BUF_ACCESS_TYPE(t) (t == DMA_BUF_ACCESS_R || \
+ t == DMA_BUF_ACCESS_W || \
+ t == DMA_BUF_ACCESS_DMA_R || \
+ t == DMA_BUF_ACCESS_DMA_W || \
+ t == DMA_BUF_ACCESS_RW || \
+ t == DMA_BUF_ACCESS_DMA_RW)
+
/**
* get_dma_buf - convenience wrapper for get_file.
* @dmabuf: [in] pointer to dma_buf
diff --git a/include/linux/dmabuf-sync.h b/include/linux/dmabuf-sync.h
new file mode 100644
index 00000000000..2502ad61a69
--- /dev/null
+++ b/include/linux/dmabuf-sync.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/dma-buf.h>
+
+enum dmabuf_sync_status {
+ DMABUF_SYNC_GOT = 1,
+ DMABUF_SYNC_LOCKED,
+};
+
+struct dmabuf_sync_reservation {
+ struct ww_mutex sync_lock;
+ struct mutex lock;
+ atomic_t shared_cnt;
+ unsigned int accessed_type;
+ unsigned int shared;
+ unsigned int locked;
+};
+
+/*
+ * A structure for dmabuf_sync_object.
+ *
+ * @head: A list head to be added to syncs list.
+ * @robj: A reservation_object object.
+ * @dma_buf: A dma_buf object.
+ * @access_type: Indicate how a current task tries to access
+ * a given buffer.
+ */
+struct dmabuf_sync_object {
+ struct list_head head;
+ struct dmabuf_sync_reservation *robj;
+ struct dma_buf *dmabuf;
+ unsigned int access_type;
+};
+
+/*
+ * A structure for dmabuf_sync.
+ *
+ * @syncs: A list head to sync object and this is global to system.
+ * @list: A list entry used as committed list node
+ * @lock: A mutex lock to current sync object.
+ * @ctx: A current context for ww mutex.
+ * @work: A work struct to release resources at timeout.
+ * @priv: A private data.
+ * @name: A string to dmabuf sync owner.
+ * @timer: A timer list to avoid lockup and release resources.
+ * @status: Indicate current status (DMABUF_SYNC_GOT or DMABUF_SYNC_LOCKED).
+ */
+struct dmabuf_sync {
+ struct list_head syncs;
+ struct list_head list;
+ struct mutex lock;
+ struct ww_acquire_ctx ctx;
+ struct work_struct work;
+ void *priv;
+ char name[64];
+ struct timer_list timer;
+ unsigned int status;
+};
+
+#ifdef CONFIG_DMABUF_SYNC
+
+extern struct ww_class dmabuf_sync_ww_class;
+
+static inline void dmabuf_sync_reservation_init(struct dma_buf *dmabuf)
+{
+ struct dmabuf_sync_reservation *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return;
+
+ dmabuf->sync = obj;
+
+ ww_mutex_init(&obj->sync_lock, &dmabuf_sync_ww_class);
+
+ mutex_init(&obj->lock);
+ atomic_set(&obj->shared_cnt, 1);
+}
+
+static inline void dmabuf_sync_reservation_fini(struct dma_buf *dmabuf)
+{
+ struct dmabuf_sync_reservation *obj;
+
+ if (!dmabuf->sync)
+ return;
+
+ obj = dmabuf->sync;
+
+ ww_mutex_destroy(&obj->sync_lock);
+
+ kfree(obj);
+}
+
+extern bool is_dmabuf_sync_supported(void);
+
+extern struct dmabuf_sync *dmabuf_sync_init(void *priv, const char *name);
+
+extern void dmabuf_sync_fini(struct dmabuf_sync *sync);
+
+extern int dmabuf_sync_lock(struct dmabuf_sync *sync);
+
+extern int dmabuf_sync_unlock(struct dmabuf_sync *sync);
+
+int dmabuf_sync_single_lock(struct dma_buf *dmabuf, unsigned int type,
+ bool wait);
+
+void dmabuf_sync_single_unlock(struct dma_buf *dmabuf);
+
+extern int dmabuf_sync_get(struct dmabuf_sync *sync, void *sync_buf,
+ unsigned int type);
+
+extern void dmabuf_sync_put(struct dmabuf_sync *sync, struct dma_buf *dmabuf);
+
+extern void dmabuf_sync_put_all(struct dmabuf_sync *sync);
+
+#else
+
+static inline void dmabuf_sync_reservation_init(struct dma_buf *dmabuf) { }
+
+static inline void dmabuf_sync_reservation_fini(struct dma_buf *dmabuf) { }
+
+static inline bool is_dmabuf_sync_supported(void) { return false; }
+
+static inline struct dmabuf_sync *dmabuf_sync_init(void *priv,
+ const char *names)
+{
+ return ERR_PTR(0);
+}
+
+static inline void dmabuf_sync_fini(struct dmabuf_sync *sync) { }
+
+static inline int dmabuf_sync_lock(struct dmabuf_sync *sync)
+{
+ return 0;
+}
+
+static inline int dmabuf_sync_unlock(struct dmabuf_sync *sync)
+{
+ return 0;
+}
+
+static inline int dmabuf_sync_single_lock(struct dma_buf *dmabuf,
+ unsigned int type,
+ bool wait)
+{
+ return 0;
+}
+
+static inline void dmabuf_sync_single_unlock(struct dma_buf *dmabuf)
+{
+ return;
+}
+
+static inline int dmabuf_sync_get(struct dmabuf_sync *sync,
+ void *sync_buf,
+ unsigned int type)
+{
+ return 0;
+}
+
+static inline void dmabuf_sync_put(struct dmabuf_sync *sync,
+ struct dma_buf *dmabuf) { }
+
+static inline void dmabuf_sync_put_all(struct dmabuf_sync *sync) { }
+
+#endif