diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-02-11 11:59:52 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-02-11 15:36:05 +0100 |
commit | eb7a5b6b04271af3b11b81baa8e18dc826b657dc (patch) | |
tree | 73a2ded4f5eb8269ee64edf2942bd0b4fe8f8542 | |
parent | de8532dd8359dfdaba839ff61fc9e2f05eaf57d3 (diff) | |
download | libdrm-eb7a5b6b04271af3b11b81baa8e18dc826b657dc.tar.gz libdrm-eb7a5b6b04271af3b11b81baa8e18dc826b657dc.tar.bz2 libdrm-eb7a5b6b04271af3b11b81baa8e18dc826b657dc.zip |
intel: Unconditionally clear ioctl structs
We really have to do this to avoid surprises when extending the ABI
later on. Especially when growing the structures.
Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
Reviewed-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
-rw-r--r-- | intel/intel_bufmgr_gem.c | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index cf85bb8a..78875fd3 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -74,7 +74,7 @@ #define VG(x) #endif -#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s))) +#define memclear(s) memset(&s, 0, sizeof(s)) #define DBG(...) do { \ if (bufmgr_gem->bufmgr.debug) \ @@ -593,7 +593,7 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo) if (bo_gem->reusable && bo_gem->idle) return false; - VG_CLEAR(busy); + memclear(busy); busy.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); @@ -612,7 +612,7 @@ drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, { struct drm_i915_gem_madvise madv; - VG_CLEAR(madv); + memclear(madv); madv.handle = bo_gem->gem_handle; madv.madv = state; madv.retained = 1; @@ -741,7 +741,7 @@ retry: bo_gem->bo.size = bo_size; - VG_CLEAR(create); + memclear(create); create.size = bo_size; ret = drmIoctl(bufmgr_gem->fd, @@ -888,7 +888,7 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, bo_gem->bo.size = size; - VG_CLEAR(userptr); + memclear(userptr); userptr.user_ptr = (__u64)((unsigned long)addr); userptr.user_size = size; userptr.flags = flags; @@ -972,7 +972,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, } } - VG_CLEAR(open_arg); + memclear(open_arg); open_arg.name = handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, @@ -1017,7 +1017,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, bo_gem->global_name = handle; bo_gem->reusable = false; - VG_CLEAR(get_tiling); + memclear(get_tiling); get_tiling.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, @@ -1060,7 +1060,7 @@ drm_intel_gem_bo_free(drm_intel_bo *bo) } /* Close this object */ - VG_CLEAR(close); + memclear(close); close.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); if (ret != 0) { @@ -1292,9 +1292,8 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) DBG("bo_map: %d (%s), map_count=%d\n", bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); - VG_CLEAR(mmap_arg); + memclear(mmap_arg); mmap_arg.handle = bo_gem->gem_handle; - mmap_arg.offset = 0; mmap_arg.size = bo->size; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, @@ -1316,7 +1315,7 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) bo_gem->mem_virtual); bo->virtual = bo_gem->mem_virtual; - VG_CLEAR(set_domain); + memclear(set_domain); set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; if (write_enable) @@ -1362,7 +1361,7 @@ map_gtt(drm_intel_bo *bo) DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); - VG_CLEAR(mmap_arg); + memclear(mmap_arg); mmap_arg.handle = bo_gem->gem_handle; /* Get the fake offset back... */ @@ -1430,7 +1429,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) * tell it when we're about to use things if we had done * rendering and it still happens to be bound to the GTT. */ - VG_CLEAR(set_domain); + memclear(set_domain); set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = I915_GEM_DOMAIN_GTT; @@ -1529,7 +1528,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) * Unlike GTT set domains, this only does work if the * buffer should be scanout-related. */ - VG_CLEAR(sw_finish); + memclear(sw_finish); sw_finish.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH, @@ -1571,7 +1570,7 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, if (bo_gem->is_userptr) return -EINVAL; - VG_CLEAR(pwrite); + memclear(pwrite); pwrite.handle = bo_gem->gem_handle; pwrite.offset = offset; pwrite.size = size; @@ -1596,7 +1595,7 @@ drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; int ret; - VG_CLEAR(get_pipe_from_crtc_id); + memclear(get_pipe_from_crtc_id); get_pipe_from_crtc_id.crtc_id = crtc_id; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, @@ -1626,7 +1625,7 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, if (bo_gem->is_userptr) return -EINVAL; - VG_CLEAR(pread); + memclear(pread); pread.handle = bo_gem->gem_handle; pread.offset = offset; pread.size = size; @@ -1694,9 +1693,9 @@ drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) } } + memclear(wait); wait.bo_handle = bo_gem->gem_handle; wait.timeout_ns = timeout_ns; - wait.flags = 0; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); if (ret == -1) return -errno; @@ -1719,7 +1718,7 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) struct drm_i915_gem_set_domain set_domain; int ret; - VG_CLEAR(set_domain); + memclear(set_domain); set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; @@ -2339,7 +2338,7 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, */ drm_intel_add_validate_buffer(bo); - VG_CLEAR(execbuf); + memclear(execbuf); execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects; execbuf.buffer_count = bufmgr_gem->exec_count; execbuf.batch_start_offset = 0; @@ -2426,7 +2425,7 @@ do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, */ drm_intel_add_validate_buffer2(bo, 0); - VG_CLEAR(execbuf); + memclear(execbuf); execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects; execbuf.buffer_count = bufmgr_gem->exec_count; execbuf.batch_start_offset = 0; @@ -2517,7 +2516,7 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) struct drm_i915_gem_pin pin; int ret; - VG_CLEAR(pin); + memclear(pin); pin.handle = bo_gem->gem_handle; pin.alignment = alignment; @@ -2540,7 +2539,7 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo) struct drm_i915_gem_unpin unpin; int ret; - VG_CLEAR(unpin); + memclear(unpin); unpin.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); @@ -2696,7 +2695,7 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); pthread_mutex_unlock(&bufmgr_gem->lock); - VG_CLEAR(get_tiling); + memclear(get_tiling); get_tiling.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, @@ -2743,7 +2742,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) if (!bo_gem->global_name) { struct drm_gem_flink flink; - VG_CLEAR(flink); + memclear(flink); flink.handle = bo_gem->gem_handle; pthread_mutex_lock(&bufmgr_gem->lock); @@ -3078,7 +3077,7 @@ static int get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) { char *devid_override; - int devid; + int devid = 0; int ret; drm_i915_getparam_t gp; @@ -3090,8 +3089,7 @@ get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) } } - VG_CLEAR(devid); - VG_CLEAR(gp); + memclear(gp); gp.param = I915_PARAM_CHIPSET_ID; gp.value = &devid; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); @@ -3204,7 +3202,7 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) if (!context) return NULL; - VG_CLEAR(create); + memclear(create); ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); if (ret != 0) { DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", @@ -3229,7 +3227,7 @@ drm_intel_gem_context_destroy(drm_intel_context *ctx) if (ctx == NULL) return; - VG_CLEAR(destroy); + memclear(destroy); bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; destroy.ctx_id = ctx->ctx_id; @@ -3255,7 +3253,7 @@ drm_intel_get_reset_stats(drm_intel_context *ctx, if (ctx == NULL) return -EINVAL; - memset(&stats, 0, sizeof(stats)); + memclear(stats); bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; stats.ctx_id = ctx->ctx_id; @@ -3285,7 +3283,7 @@ drm_intel_reg_read(drm_intel_bufmgr *bufmgr, struct drm_i915_reg_read reg_read; int ret; - VG_CLEAR(reg_read); + memclear(reg_read); reg_read.offset = offset; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read); @@ -3390,7 +3388,7 @@ has_userptr(drm_intel_bufmgr_gem *bufmgr_gem) return false; } - memset(&userptr, 0, sizeof(userptr)); + memclear(userptr); userptr.user_ptr = (__u64)(unsigned long)ptr; userptr.user_size = pgsz; @@ -3405,6 +3403,7 @@ retry: return false; } + memclear(close_bo); close_bo.handle = userptr.handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo); free(ptr); @@ -3451,6 +3450,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) goto exit; } + memclear(aperture); ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); @@ -3500,7 +3500,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->gtt_size -= 256*1024*1024; } - VG_CLEAR(gp); + memclear(gp); gp.value = &tmp; gp.param = I915_PARAM_HAS_EXECBUF2; |