From 9832709c7a9968f213d321e0798a2039237c44f2 Mon Sep 17 00:00:00 2001 From: YoungJun Cho Date: Thu, 27 Jun 2013 08:39:58 +0900 Subject: drm/gem: add mutex lock when using drm_gem_mmap_obj The drm_gem_mmap_obj() has to be protected with dev->struct_mutex, but some caller functions do not. So it adds mutex lock to missing callers and adds assertion to check whether drm_gem_mmap_obj() is called with mutex lock or not. Signed-off-by: YoungJun Cho Signed-off-by: Seung-Woo Kim Signed-off-by: Kyungmin Park Reviewed-by: Maarten Lankhorst Reviewed-by: Laurent Pinchart Reviewed-by: Rob Clark Reviewed-by: Maarten Lankhorst Signed-off-by: Dave Airlie Conflicts: drivers/gpu/drm/drm_gem_cma_helper.c drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c Change-Id: Icb683c218b3455f113c073c33166faab5a7fcc4c --- drivers/gpu/drm/drm_gem.c | 4 + drivers/gpu/drm/drm_gem_cma_helper.c | 286 ++++++++++++++++++++++++++++++ drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 1 + 3 files changed, 291 insertions(+) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f94c4646f1c..22c4acde01b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -722,6 +722,8 @@ EXPORT_SYMBOL(drm_gem_vm_close); * the GEM object is not looked up based on its fake offset. To implement the * DRM mmap operation, drivers should use the drm_gem_mmap() function. * + * NOTE: This function has to be protected with dev->struct_mutex + * * Return 0 or success or -EINVAL if the object size is smaller than the VMA * size, or if no gem_vm_ops are provided. */ @@ -730,6 +732,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, { struct drm_device *dev = obj->dev; + lockdep_assert_held(&dev->struct_mutex); + /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 0a7e011509b..33121449038 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -270,3 +270,289 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m } EXPORT_SYMBOL_GPL(drm_gem_cma_describe); #endif + +/* ----------------------------------------------------------------------------- + * DMA-BUF + */ + +struct drm_gem_cma_dmabuf_attachment { + struct sg_table sgt; + enum dma_data_direction dir; +}; + +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach; + + cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL); + if (!cma_attach) + return -ENOMEM; + + cma_attach->dir = DMA_NONE; + attach->priv = cma_attach; + + return 0; +} + +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; + struct sg_table *sgt; + + if (cma_attach == NULL) + return; + + sgt = &cma_attach->sgt; + + if (cma_attach->dir != DMA_NONE) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, + cma_attach->dir); + + sg_free_table(sgt); + kfree(cma_attach); + attach->priv = NULL; +} + +static struct sg_table * +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; + struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv; + struct drm_device *drm = cma_obj->base.dev; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + unsigned int i; + int nents, ret; + + DRM_DEBUG_PRIME("\n"); + + if (WARN_ON(dir == DMA_NONE)) + return ERR_PTR(-EINVAL); + + /* Return the cached mapping when possible. */ + if (cma_attach->dir == dir) + return &cma_attach->sgt; + + /* Two mappings with different directions for the same attachment are + * not allowed. + */ + if (WARN_ON(cma_attach->dir != DMA_NONE)) + return ERR_PTR(-EBUSY); + + sgt = &cma_attach->sgt; + + ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to alloc sgt.\n"); + return ERR_PTR(-ENOMEM); + } + + mutex_lock(&drm->struct_mutex); + + rd = cma_obj->sgt->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); + if (!nents) { + DRM_ERROR("failed to map sgl with iommu.\n"); + sg_free_table(sgt); + sgt = ERR_PTR(-EIO); + goto done; + } + + cma_attach->dir = dir; + attach->priv = cma_attach; + + DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size); + +done: + mutex_unlock(&drm->struct_mutex); + return sgt; +} + +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + /* Nothing to do. */ +} + +static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + DRM_DEBUG_PRIME("%s\n", __FILE__); + + /* + * drm_gem_cma_dmabuf_release() call means that file object's + * f_count is 0 and it calls drm_gem_object_handle_unreference() + * to drop the references that these values had been increased + * at drm_prime_handle_to_fd() + */ + if (cma_obj->base.export_dma_buf == dmabuf) { + cma_obj->base.export_dma_buf = NULL; + + /* + * drop this gem object refcount to release allocated buffer + * and resources. + */ + drm_gem_object_unreference_unlocked(&cma_obj->base); + } +} + +static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf, + unsigned long page_num) +{ + /* TODO */ + + return NULL; +} + +static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long page_num, void *addr) +{ + /* TODO */ +} + +static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf, + unsigned long page_num) +{ + /* TODO */ + + return NULL; +} + +static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf, + unsigned long page_num, void *addr) +{ + /* TODO */ +} + +static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + struct drm_gem_object *gem_obj = &cma_obj->base; + struct drm_device *dev = gem_obj->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma); + mutex_unlock(&dev->struct_mutex); + if (ret < 0) + return ret; + + return drm_gem_cma_mmap_obj(cma_obj, vma); +} + +static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + return cma_obj->vaddr; +} + +static struct dma_buf_ops drm_gem_cma_dmabuf_ops = { + .attach = drm_gem_cma_dmabuf_attach, + .detach = drm_gem_cma_dmabuf_detach, + .map_dma_buf = drm_gem_cma_dmabuf_map, + .unmap_dma_buf = drm_gem_cma_dmabuf_unmap, + .kmap = drm_gem_cma_dmabuf_kmap, + .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic, + .kunmap = drm_gem_cma_dmabuf_kunmap, + .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic, + .mmap = drm_gem_cma_dmabuf_mmap, + .vmap = drm_gem_cma_dmabuf_vmap, + .release = drm_gem_cma_dmabuf_release, +}; + +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm, + struct drm_gem_object *obj, int flags) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops, + cma_obj->base.size, flags); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export); + +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm, + struct dma_buf *dma_buf) +{ + struct drm_gem_cma_object *cma_obj; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + int ret; + + DRM_DEBUG_PRIME("%s\n", __FILE__); + + /* is this one of own objects? */ + if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) { + struct drm_gem_object *obj; + + cma_obj = dma_buf->priv; + obj = &cma_obj->base; + + /* is it from our device? */ + if (obj->dev == drm) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); + dma_buf_put(dma_buf); + return obj; + } + } + + /* Create a CMA GEM buffer. */ + cma_obj = __drm_gem_cma_create(drm, dma_buf->size); + if (IS_ERR(cma_obj)) + return ERR_PTR(PTR_ERR(cma_obj)); + + /* Attach to the buffer and map it. Make sure the mapping is contiguous + * on the device memory bus, as that's all we support. + */ + attach = dma_buf_attach(dma_buf, drm->dev); + if (IS_ERR(attach)) { + ret = -EINVAL; + goto error_gem_free; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + ret = sgt ? PTR_ERR(sgt) : -ENOMEM; + goto error_buf_detach; + } + + if (sgt->nents != 1) { + ret = -EINVAL; + goto error_buf_unmap; + } + + cma_obj->base.import_attach = attach; + cma_obj->paddr = sg_dma_address(sgt->sgl); + cma_obj->sgt = sgt; + + DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, + dma_buf->size); + + return &cma_obj->base; + +error_buf_unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +error_buf_detach: + dma_buf_detach(dma_buf, attach); +error_gem_free: + drm_gem_cma_free_object(&cma_obj->base); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import); diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index be7cd97a0db..6d5ae6b5d2b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -144,6 +144,7 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) { struct drm_gem_object *obj = buffer->priv; + struct drm_device *dev = obj->dev; int ret = 0; if (WARN_ON(!obj->filp)) -- cgit v1.2.3