summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-12-03 02:05:05 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2010-12-03 14:05:30 +0000
commit55c5f1876e2329a938955967f5d45c814e50beb5 (patch)
treef2de8d31a03da940a9bd940db81bd788ae9f0dfb
parent3cc74044ce3546cc7dc2e918cbabbb41a77f4026 (diff)
downloadxf86-video-intel-55c5f1876e2329a938955967f5d45c814e50beb5.tar.gz
xf86-video-intel-55c5f1876e2329a938955967f5d45c814e50beb5.tar.bz2
xf86-video-intel-55c5f1876e2329a938955967f5d45c814e50beb5.zip
Wait on the current buffer to complete when running synchronously.
And remove the vestigal wait upon changing crtc as this is more properly done in the kernel. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/intel.h1
-rw-r--r--src/intel_batchbuffer.c32
-rw-r--r--src/intel_batchbuffer.h1
-rw-r--r--src/intel_display.c3
4 files changed, 3 insertions, 34 deletions
diff --git a/src/intel.h b/src/intel.h
index 57ce2cfd3..97fc497e4 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -316,7 +316,6 @@ typedef struct intel_screen_private {
/** Number of bytes to be emitted in the current BEGIN_BATCH. */
uint32_t batch_emitting;
dri_bo *batch_bo;
- dri_bo *last_batch_bo;
/** Whether we're in a section of code that can't tolerate flushing */
Bool in_batch_atomic;
/** Ending batch_used that was verified by intel_start_batch_atomic() */
diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
index 378a78c74..8c7ab3d81 100644
--- a/src/intel_batchbuffer.c
+++ b/src/intel_batchbuffer.c
@@ -102,11 +102,6 @@ void intel_batch_teardown(ScrnInfoPtr scrn)
intel->batch_bo = NULL;
}
- if (intel->last_batch_bo != NULL) {
- dri_bo_unreference(intel->last_batch_bo);
- intel->last_batch_bo = NULL;
- }
-
if (intel->vertex_bo) {
dri_bo_unreference(intel->vertex_bo);
intel->vertex_bo = NULL;
@@ -267,39 +262,18 @@ void intel_batch_submit(ScrnInfoPtr scrn, int flush)
free(entry);
}
- /* Save a ref to the last batch emitted, which we use for syncing
- * in debug code.
- */
- dri_bo_unreference(intel->last_batch_bo);
- intel->last_batch_bo = intel->batch_bo;
- intel->batch_bo = NULL;
+ if (intel->debug_flush & DEBUG_FLUSH_WAIT)
+ drm_intel_bo_wait_rendering(intel->batch_bo);
+ dri_bo_unreference(intel->batch_bo);
intel_next_batch(scrn);
- if (intel->debug_flush & DEBUG_FLUSH_WAIT)
- intel_batch_wait_last(scrn);
-
if (intel->batch_commit_notify)
intel->batch_commit_notify(intel);
intel->current_batch = 0;
}
-/** Waits on the last emitted batchbuffer to be completed. */
-void intel_batch_wait_last(ScrnInfoPtr scrn)
-{
- intel_screen_private *intel = intel_get_screen_private(scrn);
-
- if (intel->last_batch_bo == NULL)
- return;
-
- /* Map it CPU write, which guarantees it's done. This is a completely
- * non performance path, so we don't need anything better.
- */
- drm_intel_gem_bo_map_gtt(intel->last_batch_bo);
- drm_intel_gem_bo_unmap_gtt(intel->last_batch_bo);
-}
-
void intel_debug_flush(ScrnInfoPtr scrn)
{
intel_screen_private *intel = intel_get_screen_private(scrn);
diff --git a/src/intel_batchbuffer.h b/src/intel_batchbuffer.h
index 5b9ff5ed3..3f783b004 100644
--- a/src/intel_batchbuffer.h
+++ b/src/intel_batchbuffer.h
@@ -38,7 +38,6 @@ void intel_batch_teardown(ScrnInfoPtr scrn);
void intel_batch_emit_flush(ScrnInfoPtr scrn);
void intel_batch_do_flush(ScrnInfoPtr scrn);
void intel_batch_submit(ScrnInfoPtr scrn, int flush);
-void intel_batch_wait_last(ScrnInfoPtr scrn);
static inline int intel_batch_space(intel_screen_private *intel)
{
diff --git a/src/intel_display.c b/src/intel_display.c
index 10bc5fd81..b5ccfe9b6 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -362,9 +362,6 @@ intel_crtc_apply(xf86CrtcPtr crtc)
crtc->gamma_blue, crtc->gamma_size);
#endif
- /* drain any pending waits on the current framebuffer */
- intel_batch_wait_last(crtc->scrn);
-
x = crtc->x;
y = crtc->y;
fb_id = mode->fb_id;