summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2013-07-22 12:42:35 +0900
committerChanho Park <chanho61.park@samsung.com>2014-11-18 11:43:39 +0900
commit1c2486404dac7b2835574a7aa73e12312f4d5401 (patch)
treecbd0b9f24ce96156f177fede4627aa74418d9158 /drivers/base
parentccd9fc42ccafc0513efcf7a92fa42a0a3760550d (diff)
downloadlinux-3.10-1c2486404dac7b2835574a7aa73e12312f4d5401.tar.gz
linux-3.10-1c2486404dac7b2835574a7aa73e12312f4d5401.tar.bz2
linux-3.10-1c2486404dac7b2835574a7aa73e12312f4d5401.zip
dmabuf-sync: fix sync lock to multiple read
This patch fixes the issue that a sync object is unlocked when shared_cnt is bigger than 1 and sobj->access_type is write. the below number means shared_cnt and three sync objects share one buffer, r r r w when write locked 1 2 3 3 <- blocked when read unlocked 2 when read unlocked 1 when read unlocked 1 <- waked up Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dmabuf-sync.c43
1 files changed, 13 insertions, 30 deletions
diff --git a/drivers/base/dmabuf-sync.c b/drivers/base/dmabuf-sync.c
index a4b8a50a2fe..14c0c6f6d42 100644
--- a/drivers/base/dmabuf-sync.c
+++ b/drivers/base/dmabuf-sync.c
@@ -63,8 +63,7 @@ static void dmabuf_sync_timeout_worker(struct work_struct *work)
continue;
}
- if (sobj->robj->shared &&
- atomic_add_unless(&sobj->robj->shared_cnt, -1, 1)) {
+ if (atomic_add_unless(&sobj->robj->shared_cnt, -1, 1)) {
mutex_unlock(&sobj->robj->lock);
continue;
}
@@ -74,6 +73,7 @@ static void dmabuf_sync_timeout_worker(struct work_struct *work)
ww_mutex_unlock(&sobj->robj->sync_lock);
mutex_lock(&sobj->robj->lock);
+ sobj->robj->locked = false;
if (sobj->access_type & DMA_BUF_ACCESS_R)
printk(KERN_WARNING "%s: r-unlocked = 0x%x\n",
@@ -183,7 +183,6 @@ retry:
if (sobj->robj->accessed_type & DMA_BUF_ACCESS_R &&
sobj->access_type & DMA_BUF_ACCESS_R) {
atomic_inc(&sobj->robj->shared_cnt);
- sobj->robj->shared = true;
mutex_unlock(&sobj->robj->lock);
continue;
}
@@ -278,29 +277,17 @@ static void dmabuf_sync_unlock_objs(struct dmabuf_sync *sync,
list_for_each_entry(sobj, &sync->syncs, head) {
mutex_lock(&sobj->robj->lock);
- if (sobj->robj->shared) {
- if (atomic_add_unless(&sobj->robj->shared_cnt, -1,
- 1)) {
- mutex_unlock(&sobj->robj->lock);
- continue;
- }
-
- mutex_unlock(&sobj->robj->lock);
-
- ww_mutex_unlock(&sobj->robj->sync_lock);
-
- mutex_lock(&sobj->robj->lock);
- sobj->robj->shared = false;
- sobj->robj->locked = false;
- } else {
+ if (atomic_add_unless(&sobj->robj->shared_cnt, -1, 1)) {
mutex_unlock(&sobj->robj->lock);
+ continue;
+ }
- ww_mutex_unlock(&sobj->robj->sync_lock);
+ mutex_unlock(&sobj->robj->lock);
- mutex_lock(&sobj->robj->lock);
- sobj->robj->locked = false;
- }
+ ww_mutex_unlock(&sobj->robj->sync_lock);
+ mutex_lock(&sobj->robj->lock);
+ sobj->robj->locked = false;
mutex_unlock(&sobj->robj->lock);
}
@@ -585,7 +572,6 @@ int dmabuf_sync_single_lock(struct dma_buf *dmabuf, unsigned int type,
/* Don't lock in case of read and read. */
if (robj->accessed_type & DMA_BUF_ACCESS_R && type & DMA_BUF_ACCESS_R) {
atomic_inc(&robj->shared_cnt);
- robj->shared = true;
mutex_unlock(&robj->lock);
return 0;
}
@@ -635,13 +621,10 @@ void dmabuf_sync_single_unlock(struct dma_buf *dmabuf)
mutex_lock(&robj->lock);
- if (robj->shared) {
- if (atomic_add_unless(&robj->shared_cnt, -1 , 1)) {
- mutex_unlock(&robj->lock);
- return;
- }
-
- robj->shared = false;
+ if (atomic_add_unless(&robj->shared_cnt, -1 , 1)) {
+ mutex_unlock(&robj->lock);
+ dma_buf_put(dmabuf);
+ return;
}
mutex_unlock(&robj->lock);