summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-15 00:02:46 +0200
committerChanho Park <chanho61.park@samsung.com>2014-11-21 19:12:24 +0900
commit7f663e197afad44296b98c1e69c2ab28721ebe0b (patch)
tree148ad8668f7505216e94868846f4cb818805c87a /drivers
parente091ad0f24a0146820ab758a17f5915fec0ea747 (diff)
downloadlinux-3.10-7f663e197afad44296b98c1e69c2ab28721ebe0b.tar.gz
linux-3.10-7f663e197afad44296b98c1e69c2ab28721ebe0b.tar.bz2
linux-3.10-7f663e197afad44296b98c1e69c2ab28721ebe0b.zip
drm/prime: proper locking+refcounting for obj->dma_buf link
The export dma-buf cache is semantically similar to an flink name. So semantically it makes sense to treat it the same and remove the name (i.e. the dma_buf pointer) and its references when the last gem handle disappears. Again we need to be careful, but double so: Not just could someone race and export with a gem close ioctl (so we need to recheck obj->handle_count again when assigning the new name), but multiple exports can also race against each another. This is prevented by holding the dev->object_name_lock across the entire section which touches obj->dma_buf. With the new scheme we also need to reinstate the obj->dma_buf link at import time (in case the only reference userspace has held in-between was through the dma-buf fd and not through any native gem handle). For simplicity we don't check whether it's a native object but unconditionally set up that link - with the new scheme of removing the obj->dma_buf reference when the last handle disappears we can do that. To make it clear that this is not just for exported buffers anymore als rename it from export_dma_buf to dma_buf. To make sure that now one can race a fd_to_handle or handle_to_fd with gem_close we use the same tricks as in flink of extending the dev->object_name_locking critical section. With this change we finally have a guaranteed 1:1 relationship (at least for native objects) between gem objects and dma-bufs, even accounting for races (which can happen since the dma-buf itself holds a reference while in-flight). This prevent igt/prime_self_import/export-vs-gem_close-race from Oopsing the kernel. There is still a leak though since the per-file priv dma-buf/handle cache handling is racy. That will be fixed in a later patch. v2: Remove the bogus dma_buf_put from the export_and_register_object failure path if we've raced with the handle count dropping to 0. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Chanho Park <chanho61.park@samsung.com> Conflicts: drivers/gpu/drm/drm_gem.c Change-Id: I915b0e73cedffa0ba358cf00510e19dccfcb4703
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c72
-rw-r--r--drivers/gpu/drm/drm_prime.c70
3 files changed, 126 insertions, 17 deletions
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 59b52b2d532..3653955746e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -535,6 +535,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 239ef30f4a6..3c30bda36af 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -208,12 +208,78 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
drm_prime_remove_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
- if (obj->export_dma_buf) {
+
+ /*
+ * Note: obj->dma_buf can't disappear as long as we still hold a
+ * handle reference in obj->handle_count.
+ */
+ if (obj->dma_buf) {
drm_prime_remove_buf_handle(&filp->prime,
- obj->export_dma_buf);
+ obj->dma_buf);
+ }
+}
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
+ */
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+ /* Unbreak the reference cycle if we have an exported dma_buf. */
+ if (obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (WARN_ON(obj->handle_count == 0))
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ mutex_lock(&obj->dev->object_name_lock);
+ if (--obj->handle_count == 0) {
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_exported_dma_buf_free(obj);
+ }
+ mutex_unlock(&obj->dev->object_name_lock);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
/**
* Removes the mapping from handle to filp for this object.
*/
@@ -557,6 +623,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
+ WARN_ON(obj->dma_buf);
+
if (obj->filp)
fput(obj->filp);
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index acb5c5b58f7..1e54b22f8f0 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -193,11 +193,8 @@ static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
- if (obj->export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(obj);
- }
+ /* drop the reference on the export fd holds */
+ drm_gem_object_unreference_unlocked(obj);
}
static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
@@ -298,6 +295,37 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ uint32_t flags)
+{
+ struct dma_buf *dmabuf;
+
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ dmabuf = ERR_PTR(-ENOENT);
+ return dmabuf;
+ }
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ return dmabuf;
+ }
+
+ /*
+ * Note that callers do not need to clean up the export cache
+ * since the check for obj->handle_count guarantees that someone
+ * will clean it up.
+ */
+ obj->dma_buf = dmabuf;
+ get_dma_buf(obj->dma_buf);
+
+ return dmabuf;
+}
+
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd)
@@ -313,15 +341,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
/* re-export the original imported object */
if (obj->import_attach) {
dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
goto out_have_obj;
}
- if (obj->export_dma_buf) {
- dmabuf = obj->export_dma_buf;
+ mutex_lock(&dev->object_name_lock);
+ if (obj->dma_buf) {
+ get_dma_buf(obj->dma_buf);
+ dmabuf = obj->dma_buf;
+ mutex_unlock(&dev->object_name_lock);
goto out_have_obj;
}
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ dmabuf = export_and_register_object(dev, obj, flags);
+ mutex_unlock(&dev->object_name_lock);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
@@ -329,14 +362,13 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
ret = PTR_ERR(dmabuf);
goto out;
}
- obj->export_dma_buf = dmabuf;
mutex_lock(&file_priv->prime.lock);
/* if we've exported this buffer the cheat and add it to the import list
* so we get the correct handle back
*/
ret = drm_prime_add_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
+ dmabuf, handle);
if (ret)
goto fail_put_dmabuf;
@@ -349,7 +381,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
return 0;
out_have_obj:
- get_dma_buf(dmabuf);
ret = dma_buf_fd(dmabuf, flags);
if (ret < 0) {
dma_buf_put(dmabuf);
@@ -365,8 +396,6 @@ fail_rm_handle:
dmabuf);
mutex_unlock(&file_priv->prime.lock);
fail_put_dmabuf:
- /* clear NOT to be checked when releasing dma_buf */
- obj->export_dma_buf = NULL;
dma_buf_put(dmabuf);
out:
drm_gem_object_unreference_unlocked(obj);
@@ -448,13 +477,22 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
goto out_put;
/* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
obj = dev->driver->gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto out_put;
+ goto out_unlock;
+ }
+
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
}
- ret = drm_gem_handle_create(file_priv, obj, handle);
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
goto out_put;
@@ -475,6 +513,8 @@ fail:
* to detach.. which seems ok..
*/
drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+ mutex_lock(&dev->object_name_lock);
out_put:
dma_buf_put(dma_buf);
mutex_unlock(&file_priv->prime.lock);