summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-08-30 11:44:51 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2014-08-30 11:44:51 +0100
commit10552b5ca6c193e0c696e96c9f5e0d6142f4d8ee (patch)
tree3444d7c434189df481759c8419c6c092155fc872
parent255bade1ea98e642fe6d01c9dee8d5e8661bd816 (diff)
downloadintel-gpu-tools-10552b5ca6c193e0c696e96c9f5e0d6142f4d8ee.tar.gz
intel-gpu-tools-10552b5ca6c193e0c696e96c9f5e0d6142f4d8ee.tar.bz2
intel-gpu-tools-10552b5ca6c193e0c696e96c9f5e0d6142f4d8ee.zip
batch: Specify number of relocations to accommodate
Since relocations are variable size, depending upon generation, it is easier to handle the resizing of the batch request inside the BEGIN_BATCH macro. This still leaves us with having to resize commands in a few places - which still need adaption for gen8+. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--benchmarks/intel_upload_blit_large.c2
-rw-r--r--benchmarks/intel_upload_blit_large_gtt.c2
-rw-r--r--benchmarks/intel_upload_blit_large_map.c2
-rw-r--r--benchmarks/intel_upload_blit_small.c2
-rw-r--r--lib/intel_batchbuffer.c8
-rw-r--r--lib/intel_batchbuffer.h50
-rw-r--r--tests/drm_vma_limiter_cached.c4
-rw-r--r--tests/gem_bad_address.c2
-rw-r--r--tests/gem_bad_batch.c2
-rw-r--r--tests/gem_bad_blit.c2
-rw-r--r--tests/gem_caching.c2
-rw-r--r--tests/gem_cs_prefetch.c2
-rw-r--r--tests/gem_double_irq_loop.c4
-rw-r--r--tests/gem_dummy_reloc_loop.c18
-rw-r--r--tests/gem_exec_bad_domains.c14
-rw-r--r--tests/gem_fenced_exec_thrash.c6
-rw-r--r--tests/gem_hang.c2
-rw-r--r--tests/gem_hangcheck_forcewake.c9
-rw-r--r--tests/gem_multi_bsd_sync_loop.c4
-rw-r--r--tests/gem_non_secure_batch.c2
-rw-r--r--tests/gem_partial_pwrite_pread.c2
-rw-r--r--tests/gem_persistent_relocs.c6
-rw-r--r--tests/gem_pipe_control_store_loop.c16
-rw-r--r--tests/gem_reloc_vs_gpu.c6
-rw-r--r--tests/gem_ring_sync_loop.c4
-rw-r--r--tests/gem_ringfill.c2
-rw-r--r--tests/gem_set_tiling_vs_blt.c10
-rw-r--r--tests/gem_storedw_loop_blt.c11
-rw-r--r--tests/gem_storedw_loop_bsd.c12
-rw-r--r--tests/gem_storedw_loop_render.c12
-rw-r--r--tests/gem_storedw_loop_vebox.c4
-rw-r--r--tests/gem_stress.c6
-rw-r--r--tests/gem_tiled_partial_pwrite_pread.c2
-rw-r--r--tests/gem_unfence_active_buffers.c14
-rw-r--r--tests/gem_unref_active_buffers.c2
-rw-r--r--tests/gem_wait_render_timeout.c12
-rw-r--r--tests/gem_write_read_ring_switch.c10
-rw-r--r--tests/kms_fbc_crc.c25
-rw-r--r--tests/kms_fence_pin_leak.c2
-rw-r--r--tests/kms_flip.c6
-rw-r--r--tests/kms_mmio_vs_cs_flip.c4
-rw-r--r--tests/kms_psr_sink_crc.c5
-rw-r--r--tests/pm_rps.c12
-rw-r--r--tests/prime_nv_pcopy.c2
-rw-r--r--tools/intel_perf_counters.c6
45 files changed, 152 insertions, 180 deletions
diff --git a/benchmarks/intel_upload_blit_large.c b/benchmarks/intel_upload_blit_large.c
index d9287abf..689f9c41 100644
--- a/benchmarks/intel_upload_blit_large.c
+++ b/benchmarks/intel_upload_blit_large.c
@@ -97,7 +97,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
drm_intel_bo_subdata(src_bo, 0, sizeof(data), data);
/* Render the junk to the dst. */
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
(width * 4) /* dst pitch */);
diff --git a/benchmarks/intel_upload_blit_large_gtt.c b/benchmarks/intel_upload_blit_large_gtt.c
index 9859a740..601496dd 100644
--- a/benchmarks/intel_upload_blit_large_gtt.c
+++ b/benchmarks/intel_upload_blit_large_gtt.c
@@ -95,7 +95,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
drm_intel_gem_bo_unmap_gtt(src_bo);
/* Render the junk to the dst. */
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
(width * 4) /* dst pitch */);
diff --git a/benchmarks/intel_upload_blit_large_map.c b/benchmarks/intel_upload_blit_large_map.c
index 771cb3cd..d9167376 100644
--- a/benchmarks/intel_upload_blit_large_map.c
+++ b/benchmarks/intel_upload_blit_large_map.c
@@ -98,7 +98,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
drm_intel_bo_unmap(src_bo);
/* Render the junk to the dst. */
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
(width * 4) /* dst pitch */);
diff --git a/benchmarks/intel_upload_blit_small.c b/benchmarks/intel_upload_blit_small.c
index b7d80687..b9640a4f 100644
--- a/benchmarks/intel_upload_blit_small.c
+++ b/benchmarks/intel_upload_blit_small.c
@@ -108,7 +108,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
}
/* Render the junk to the dst. */
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
(width * 4) /* dst pitch */);
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 175791e1..7313bb59 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -360,9 +360,7 @@ intel_blt_copy(struct intel_batchbuffer *batch,
igt_fail(1);
}
- BEGIN_BATCH(gen >= 8 ? 10 : 8);
- OUT_BATCH(XY_SRC_COPY_BLT_CMD | cmd_bits |
- (gen >= 8 ? 8 : 6));
+ BLIT_COPY_BATCH_START(cmd_bits);
OUT_BATCH((br13_bits) |
(0xcc << 16) | /* copy ROP */
dst_pitch);
@@ -376,12 +374,14 @@ intel_blt_copy(struct intel_batchbuffer *batch,
#define CMD_POLY_STIPPLE_OFFSET 0x7906
if (gen == 5) {
+ BEGIN_BATCH(2, 0);
OUT_BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
OUT_BATCH(0);
+ ADVANCE_BATCH();
}
if (gen >= 6 && src_bo == dst_bo) {
- BEGIN_BATCH(3);
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 37955a78..74cf13bb 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -77,6 +77,7 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
/**
* BEGIN_BATCH:
* @n: number of DWORDS to emit
+ * @r: number of RELOCS to emit
*
* Prepares a batch to emit @n DWORDS, flushing it if there's not enough space
* available.
@@ -84,10 +85,13 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
* scope.
*/
-#define BEGIN_BATCH(n) do { \
+#define BEGIN_BATCH(n, r) do { \
+ int __n = (n); \
igt_assert(batch->end == NULL); \
- intel_batchbuffer_require_space(batch, (n)*4); \
- batch->end = batch->ptr + (n) * 4; \
+ if (batch->gen >= 8) __n += r; \
+ __n *= 4; \
+ intel_batchbuffer_require_space(batch, __n); \
+ batch->end = batch->ptr + __n; \
} while (0)
/**
@@ -150,35 +154,21 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
batch->end = NULL; \
} while(0)
-#define BLIT_COPY_BATCH_START(devid, flags) do { \
- if (intel_gen(devid) >= 8) { \
- BEGIN_BATCH(10); \
- OUT_BATCH(XY_SRC_COPY_BLT_CMD | \
- XY_SRC_COPY_BLT_WRITE_ALPHA | \
- XY_SRC_COPY_BLT_WRITE_RGB | \
- (flags) | 8); \
- } else { \
- BEGIN_BATCH(8); \
- OUT_BATCH(XY_SRC_COPY_BLT_CMD | \
- XY_SRC_COPY_BLT_WRITE_ALPHA | \
- XY_SRC_COPY_BLT_WRITE_RGB | \
- (flags) | 6); \
- } \
+#define BLIT_COPY_BATCH_START(flags) do { \
+ BEGIN_BATCH(8, 2); \
+ OUT_BATCH(XY_SRC_COPY_BLT_CMD | \
+ XY_SRC_COPY_BLT_WRITE_ALPHA | \
+ XY_SRC_COPY_BLT_WRITE_RGB | \
+ (flags) | \
+ (6 + (2*batch->gen >= 8))); \
} while(0)
-#define COLOR_BLIT_COPY_BATCH_START(devid, flags) do { \
- if (intel_gen(devid) >= 8) { \
- BEGIN_BATCH(8); \
- OUT_BATCH(MI_NOOP); \
- OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 0x5 | \
- COLOR_BLT_WRITE_ALPHA | \
- XY_COLOR_BLT_WRITE_RGB); \
- } else { \
- BEGIN_BATCH(6); \
- OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 0x4 | \
- COLOR_BLT_WRITE_ALPHA | \
- XY_COLOR_BLT_WRITE_RGB); \
- } \
+#define COLOR_BLIT_COPY_BATCH_START(flags) do { \
+ BEGIN_BATCH(6, 1); \
+ OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | \
+ COLOR_BLT_WRITE_ALPHA | \
+ XY_COLOR_BLT_WRITE_RGB | \
+ (4 + (batch->gen >= 8))); \
} while(0)
void
diff --git a/tests/drm_vma_limiter_cached.c b/tests/drm_vma_limiter_cached.c
index 9383587b..74d0c1a0 100644
--- a/tests/drm_vma_limiter_cached.c
+++ b/tests/drm_vma_limiter_cached.c
@@ -81,7 +81,7 @@ igt_simple_main
/* put some load onto the gpu to keep the light buffers active for long
* enough */
for (i = 0; i < 10000; i++) {
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
@@ -110,7 +110,7 @@ igt_simple_main
drm_intel_gem_bo_unmap_gtt(bo[j]);
/* put it onto the active list ... */
- COLOR_BLIT_COPY_BATCH_START(intel_get_drm_devid(fd), 0);
+ COLOR_BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
128);
OUT_BATCH(0); /* dst x1,y1 */
diff --git a/tests/gem_bad_address.c b/tests/gem_bad_address.c
index e7a9587b..4a4a5703 100644
--- a/tests/gem_bad_address.c
+++ b/tests/gem_bad_address.c
@@ -50,7 +50,7 @@ struct intel_batchbuffer *batch;
static void
bad_store(void)
{
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 0);
OUT_BATCH(MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL | 1 << 21);
OUT_BATCH(0);
OUT_BATCH(BAD_GTT_DEST);
diff --git a/tests/gem_bad_batch.c b/tests/gem_bad_batch.c
index 7f92a939..e3de3b6d 100644
--- a/tests/gem_bad_batch.c
+++ b/tests/gem_bad_batch.c
@@ -48,7 +48,7 @@ struct intel_batchbuffer *batch;
static void
bad_batch(void)
{
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 0);
OUT_BATCH(MI_BATCH_BUFFER_START);
OUT_BATCH(0);
ADVANCE_BATCH();
diff --git a/tests/gem_bad_blit.c b/tests/gem_bad_blit.c
index 33d1ac8c..b467ba83 100644
--- a/tests/gem_bad_blit.c
+++ b/tests/gem_bad_blit.c
@@ -78,7 +78,7 @@ bad_blit(drm_intel_bo *src_bo, uint32_t devid)
cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
}
- BLIT_COPY_BATCH_START(devid, cmd_bits);
+ BLIT_COPY_BATCH_START(cmd_bits);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
dst_pitch);
diff --git a/tests/gem_caching.c b/tests/gem_caching.c
index ddd7d943..b7f4bfb0 100644
--- a/tests/gem_caching.c
+++ b/tests/gem_caching.c
@@ -63,7 +63,7 @@ int fd;
static void
copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
{
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
diff --git a/tests/gem_cs_prefetch.c b/tests/gem_cs_prefetch.c
index ad5f4c69..e64b224b 100644
--- a/tests/gem_cs_prefetch.c
+++ b/tests/gem_cs_prefetch.c
@@ -134,7 +134,7 @@ igt_simple_main
/* copy the sample batch with the gpu to the new one, so that we
* also test the unmappable part of the gtt. */
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
diff --git a/tests/gem_double_irq_loop.c b/tests/gem_double_irq_loop.c
index 0fbb46e3..f9dab7c7 100644
--- a/tests/gem_double_irq_loop.c
+++ b/tests/gem_double_irq_loop.c
@@ -62,7 +62,7 @@ dummy_reloc_loop(void)
int i;
for (i = 0; i < 0x800; i++) {
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4*4096);
@@ -75,7 +75,7 @@ dummy_reloc_loop(void)
ADVANCE_BATCH();
intel_batchbuffer_flush(batch);
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
diff --git a/tests/gem_dummy_reloc_loop.c b/tests/gem_dummy_reloc_loop.c
index 4fe07860..7a971403 100644
--- a/tests/gem_dummy_reloc_loop.c
+++ b/tests/gem_dummy_reloc_loop.c
@@ -71,23 +71,21 @@ dummy_reloc_loop(int ring)
int i;
for (i = 0; i < 0x100000; i++) {
+ BEGIN_BATCH(4, 1);
if (ring == I915_EXEC_RENDER) {
- BEGIN_BATCH(4);
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
OUT_BATCH(0xffffffff); /* compare dword */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(MI_NOOP);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
intel_batchbuffer_flush_on_ring(batch, ring);
drm_intel_bo_map(target_buffer, 0);
@@ -106,23 +104,21 @@ dummy_reloc_loop_random_ring(int num_rings)
for (i = 0; i < 0x100000; i++) {
int ring = random() % num_rings + 1;
+ BEGIN_BATCH(4, 1);
if (ring == I915_EXEC_RENDER) {
- BEGIN_BATCH(4);
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
OUT_BATCH(0xffffffff); /* compare dword */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(MI_NOOP);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
intel_batchbuffer_flush_on_ring(batch, ring);
drm_intel_bo_map(target_buffer, 0);
@@ -148,23 +144,21 @@ dummy_reloc_loop_random_ring_multi_fd(int num_rings)
mindex = random() % NUM_FD;
batch = mbatch[mindex];
+ BEGIN_BATCH(4, 1);
if (ring == I915_EXEC_RENDER) {
- BEGIN_BATCH(4);
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
OUT_BATCH(0xffffffff); /* compare dword */
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(MI_NOOP);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
intel_batchbuffer_flush_on_ring(batch, ring);
drm_intel_bo_map(target_buffer, 0);
diff --git a/tests/gem_exec_bad_domains.c b/tests/gem_exec_bad_domains.c
index 7641f8f8..99012815 100644
--- a/tests/gem_exec_bad_domains.c
+++ b/tests/gem_exec_bad_domains.c
@@ -163,13 +163,13 @@ igt_main
}
igt_subtest("cpu-domain") {
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 1);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, 0, 0);
ADVANCE_BATCH();
igt_assert(run_batch() == -EINVAL);
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 1);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU, 0);
ADVANCE_BATCH();
@@ -177,13 +177,13 @@ igt_main
}
igt_subtest("gtt-domain") {
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 1);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, 0, 0);
ADVANCE_BATCH();
igt_assert(run_batch() == -EINVAL);
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 1);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT, 0);
ADVANCE_BATCH();
@@ -193,7 +193,7 @@ igt_main
/* Note: Older kernels disallow this. Punt on the skip check though
* since this is too old. */
igt_subtest("conflicting-write-domain") {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 2);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER, 0);
@@ -208,14 +208,14 @@ igt_main
multi_write_domain(fd);
igt_subtest("invalid-gpu-domain") {
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 1);
OUT_BATCH(0);
OUT_RELOC(tmp, ~(I915_GEM_GPU_DOMAINS | I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU),
0, 0);
ADVANCE_BATCH();
igt_assert(run_batch() == -EINVAL);
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 1);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT << 1,
I915_GEM_DOMAIN_GTT << 1, 0);
diff --git a/tests/gem_fenced_exec_thrash.c b/tests/gem_fenced_exec_thrash.c
index 6ef21a4c..85ead308 100644
--- a/tests/gem_fenced_exec_thrash.c
+++ b/tests/gem_fenced_exec_thrash.c
@@ -85,7 +85,7 @@ static void emit_dummy_load(void)
}
for (i = 0; i < 5; i++) {
- BLIT_COPY_BATCH_START(devid, tile_flags);
+ BLIT_COPY_BATCH_START(tile_flags);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
@@ -97,8 +97,8 @@ static void emit_dummy_load(void)
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/gem_hang.c b/tests/gem_hang.c
index d5eb5641..7a7c8acb 100644
--- a/tests/gem_hang.c
+++ b/tests/gem_hang.c
@@ -54,7 +54,7 @@ gpu_hang(void)
cmd = bad_pipe ? MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW :
MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
- BEGIN_BATCH(6);
+ BEGIN_BATCH(6, 0);
/* The documentation says that the LOAD_SCAN_LINES command
* always comes in pairs. Don't ask me why. */
OUT_BATCH(MI_LOAD_SCAN_LINES_INCL | (bad_pipe << 20));
diff --git a/tests/gem_hangcheck_forcewake.c b/tests/gem_hangcheck_forcewake.c
index 219a2653..ec74c511 100644
--- a/tests/gem_hangcheck_forcewake.c
+++ b/tests/gem_hangcheck_forcewake.c
@@ -88,9 +88,8 @@ igt_simple_main
pitch /= 4;
for (i = 0; i < 10000; i++) {
- BLIT_COPY_BATCH_START(devid,
- XY_SRC_COPY_BLT_SRC_TILED |
- XY_SRC_COPY_BLT_DST_TILED);
+ BLIT_COPY_BATCH_START(XY_SRC_COPY_BLT_SRC_TILED |
+ XY_SRC_COPY_BLT_DST_TILED);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
@@ -102,8 +101,8 @@ igt_simple_main
OUT_RELOC_FENCED(bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
index 003da42f..dec738b6 100644
--- a/tests/gem_multi_bsd_sync_loop.c
+++ b/tests/gem_multi_bsd_sync_loop.c
@@ -78,7 +78,7 @@ store_dword_loop(int fd)
mindex = random() % NUM_FD;
batch = mbatch[mindex];
if (ring == I915_EXEC_RENDER) {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
OUT_BATCH(0xffffffff); /* compare dword */
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
@@ -86,7 +86,7 @@ store_dword_loop(int fd)
OUT_BATCH(MI_NOOP);
ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
diff --git a/tests/gem_non_secure_batch.c b/tests/gem_non_secure_batch.c
index 01101e96..d8969da8 100644
--- a/tests/gem_non_secure_batch.c
+++ b/tests/gem_non_secure_batch.c
@@ -66,7 +66,7 @@ mi_lri_loop(void)
for (i = 0; i < 0x100; i++) {
int ring = random() % num_rings + 1;
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 0);
OUT_BATCH(MI_LOAD_REGISTER_IMM | 1);
OUT_BATCH(0x203c); /* RENDER RING CTL */
OUT_BATCH(0); /* try to stop the ring */
diff --git a/tests/gem_partial_pwrite_pread.c b/tests/gem_partial_pwrite_pread.c
index 92cc0578..b9ffeec1 100644
--- a/tests/gem_partial_pwrite_pread.c
+++ b/tests/gem_partial_pwrite_pread.c
@@ -63,7 +63,7 @@ int fd;
static void
copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
{
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
diff --git a/tests/gem_persistent_relocs.c b/tests/gem_persistent_relocs.c
index 11e7b6c0..585eda8d 100644
--- a/tests/gem_persistent_relocs.c
+++ b/tests/gem_persistent_relocs.c
@@ -125,7 +125,7 @@ static void emit_dummy_load(int pitch)
}
for (i = 0; i < 5; i++) {
- BLIT_COPY_BATCH_START(devid, tile_flags);
+ BLIT_COPY_BATCH_START(tile_flags);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
@@ -137,8 +137,8 @@ static void emit_dummy_load(int pitch)
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (intel_gen(devid) >= 6) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/gem_pipe_control_store_loop.c b/tests/gem_pipe_control_store_loop.c
index 27d10913..86681f2a 100644
--- a/tests/gem_pipe_control_store_loop.c
+++ b/tests/gem_pipe_control_store_loop.c
@@ -77,7 +77,7 @@ store_pipe_control_loop(bool preuse_buffer)
igt_assert(target_bo);
if (preuse_buffer) {
- COLOR_BLIT_COPY_BATCH_START(devid, 0);
+ COLOR_BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | (0xf0 << 16) | 64);
OUT_BATCH(0);
OUT_BATCH(1 << 16 | 1);
@@ -99,8 +99,8 @@ store_pipe_control_loop(bool preuse_buffer)
/* gem_storedw_batches_loop.c is a bit overenthusiastic with
* creating new batchbuffers - with buffer reuse disabled, the
* support code will do that for us. */
- if (intel_gen(devid) >= 8) {
- BEGIN_BATCH(5);
+ if (batch->gen >= 8) {
+ BEGIN_BATCH(4, 1);
OUT_BATCH(GFX_OP_PIPE_CONTROL + 1);
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
OUT_RELOC_FENCED(target_bo,
@@ -109,10 +109,10 @@ store_pipe_control_loop(bool preuse_buffer)
OUT_BATCH(val); /* write data */
ADVANCE_BATCH();
- } else if (intel_gen(devid) >= 6) {
+ } else if (batch->gen >= 6) {
/* work-around hw issue, see intel_emit_post_sync_nonzero_flush
* in mesa sources. */
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(GFX_OP_PIPE_CONTROL);
OUT_BATCH(PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
@@ -120,7 +120,7 @@ store_pipe_control_loop(bool preuse_buffer)
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(GFX_OP_PIPE_CONTROL);
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
OUT_RELOC(target_bo,
@@ -128,8 +128,8 @@ store_pipe_control_loop(bool preuse_buffer)
PIPE_CONTROL_GLOBAL_GTT);
OUT_BATCH(val); /* write data */
ADVANCE_BATCH();
- } else if (intel_gen(devid) >= 4) {
- BEGIN_BATCH(4);
+ } else if (batch->gen >= 4) {
+ BEGIN_BATCH(4, 1);
OUT_BATCH(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_WC_FLUSH |
PIPE_CONTROL_TC_FLUSH |
PIPE_CONTROL_WRITE_IMMEDIATE | 2);
diff --git a/tests/gem_reloc_vs_gpu.c b/tests/gem_reloc_vs_gpu.c
index d799bb92..bd6acdfd 100644
--- a/tests/gem_reloc_vs_gpu.c
+++ b/tests/gem_reloc_vs_gpu.c
@@ -117,7 +117,7 @@ static void emit_dummy_load(int pitch)
}
for (i = 0; i < 10; i++) {
- BLIT_COPY_BATCH_START(devid, tile_flags);
+ BLIT_COPY_BATCH_START(tile_flags);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
@@ -129,8 +129,8 @@ static void emit_dummy_load(int pitch)
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (intel_gen(devid) >= 6) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/gem_ring_sync_loop.c b/tests/gem_ring_sync_loop.c
index d4e7e2a8..fc510e4b 100644
--- a/tests/gem_ring_sync_loop.c
+++ b/tests/gem_ring_sync_loop.c
@@ -67,7 +67,7 @@ store_dword_loop(int fd)
int ring = random() % num_rings + 1;
if (ring == I915_EXEC_RENDER) {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
OUT_BATCH(0xffffffff); /* compare dword */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
@@ -75,7 +75,7 @@ store_dword_loop(int fd)
OUT_BATCH(MI_NOOP);
ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
diff --git a/tests/gem_ringfill.c b/tests/gem_ringfill.c
index 46a0e770..3ecd25e5 100644
--- a/tests/gem_ringfill.c
+++ b/tests/gem_ringfill.c
@@ -178,7 +178,7 @@ static void blt_copy(struct intel_batchbuffer *batch,
unsigned w, unsigned h,
struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
{
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
dst->stride);
diff --git a/tests/gem_set_tiling_vs_blt.c b/tests/gem_set_tiling_vs_blt.c
index 1b7e4598..4de325cd 100644
--- a/tests/gem_set_tiling_vs_blt.c
+++ b/tests/gem_set_tiling_vs_blt.c
@@ -86,7 +86,7 @@ static void do_test(uint32_t tiling, unsigned stride,
busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
for (i = 0; i < 250; i++) {
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
2*1024*4);
@@ -98,8 +98,8 @@ static void do_test(uint32_t tiling, unsigned stride,
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
@@ -157,7 +157,7 @@ static void do_test(uint32_t tiling, unsigned stride,
blt_bits = XY_SRC_COPY_BLT_SRC_TILED;
}
- BLIT_COPY_BATCH_START(devid, blt_bits);
+ BLIT_COPY_BATCH_START(blt_bits);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
stride);
@@ -181,7 +181,7 @@ static void do_test(uint32_t tiling, unsigned stride,
/* Note: We don't care about gen4+ here because the blitter doesn't use
* fences there. So not setting tiling flags on the tiled buffer is ok.
*/
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
stride_after);
diff --git a/tests/gem_storedw_loop_blt.c b/tests/gem_storedw_loop_blt.c
index 7f12b52a..43750cc3 100644
--- a/tests/gem_storedw_loop_blt.c
+++ b/tests/gem_storedw_loop_blt.c
@@ -59,22 +59,19 @@ emit_store_dword_imm(int devid, drm_intel_bo *dest, uint32_t val)
if (!has_ppgtt)
cmd |= MI_MEM_VIRTUAL;
- if (intel_gen(devid) >= 8) {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
+ BEGIN_BATCH(4, 0);
+ OUT_BATCH(cmd);
+ if (batch->gen >= 8) {
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(val);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
OUT_BATCH(0); /* reserved */
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(val);
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
}
static void
diff --git a/tests/gem_storedw_loop_bsd.c b/tests/gem_storedw_loop_bsd.c
index f89d522e..d5451be3 100644
--- a/tests/gem_storedw_loop_bsd.c
+++ b/tests/gem_storedw_loop_bsd.c
@@ -59,23 +59,19 @@ emit_store_dword_imm(int devid, drm_intel_bo *dest, uint32_t val)
if (!has_ppgtt)
cmd |= MI_MEM_VIRTUAL;
- if (intel_gen(devid) >= 8) {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
+ BEGIN_BATCH(4, 0);
+ OUT_BATCH(cmd);
+ if (batch->gen >= 8) {
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0);
OUT_BATCH(val);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
OUT_BATCH(0); /* reserved */
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(val);
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
}
static void
diff --git a/tests/gem_storedw_loop_render.c b/tests/gem_storedw_loop_render.c
index 9defc6d6..e1d3dadb 100644
--- a/tests/gem_storedw_loop_render.c
+++ b/tests/gem_storedw_loop_render.c
@@ -59,23 +59,19 @@ emit_store_dword_imm(int devid, drm_intel_bo *dest, uint32_t val)
if (!has_ppgtt)
cmd |= MI_MEM_VIRTUAL;
- if (intel_gen(devid) >= 8) {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
+ BEGIN_BATCH(4, 0);
+ OUT_BATCH(cmd);
+ if (batch->gen >= 8) {
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0);
OUT_BATCH(val);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
OUT_BATCH(0); /* reserved */
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(val);
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
}
static void
diff --git a/tests/gem_storedw_loop_vebox.c b/tests/gem_storedw_loop_vebox.c
index 7f43167a..5e5536f9 100644
--- a/tests/gem_storedw_loop_vebox.c
+++ b/tests/gem_storedw_loop_vebox.c
@@ -62,9 +62,9 @@ store_dword_loop(int divider)
cmd = MI_STORE_DWORD_IMM;
for (i = 0; i < SLOW_QUICK(0x2000, 0x10); i++) {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 0);
OUT_BATCH(cmd);
- if (intel_gen(batch->devid) < 8)
+ if (batch->gen < 8)
OUT_BATCH(0); /* reserved */
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
diff --git a/tests/gem_stress.c b/tests/gem_stress.c
index c3dd531e..8d62b035 100644
--- a/tests/gem_stress.c
+++ b/tests/gem_stress.c
@@ -163,7 +163,7 @@ static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pit
}
/* copy lower half to upper half */
- BLIT_COPY_BATCH_START(devid, cmd_bits);
+ BLIT_COPY_BATCH_START(cmd_bits);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
dst_pitch);
@@ -175,8 +175,8 @@ static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pit
OUT_RELOC_FENCED(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/gem_tiled_partial_pwrite_pread.c b/tests/gem_tiled_partial_pwrite_pread.c
index a4a9d0be..cf8f48d6 100644
--- a/tests/gem_tiled_partial_pwrite_pread.c
+++ b/tests/gem_tiled_partial_pwrite_pread.c
@@ -84,7 +84,7 @@ copy_bo(drm_intel_bo *src, int src_tiled,
cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
}
- BLIT_COPY_BATCH_START(devid, cmd_bits);
+ BLIT_COPY_BATCH_START(cmd_bits);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
dst_pitch);
diff --git a/tests/gem_unfence_active_buffers.c b/tests/gem_unfence_active_buffers.c
index 2c221a2b..fffe3a79 100644
--- a/tests/gem_unfence_active_buffers.c
+++ b/tests/gem_unfence_active_buffers.c
@@ -86,7 +86,7 @@ igt_simple_main
busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
for (i = 0; i < 250; i++) {
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
2*1024*4);
@@ -98,8 +98,8 @@ igt_simple_main
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
@@ -119,7 +119,7 @@ igt_simple_main
drm_intel_bo_disable_reuse(test_bo);
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
TEST_STRIDE);
@@ -138,7 +138,7 @@ igt_simple_main
/* launch a few batchs to ensure the damaged slab objects get reused. */
for (i = 0; i < 10; i++) {
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
2*1024*4);
@@ -150,8 +150,8 @@ igt_simple_main
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 8) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/gem_unref_active_buffers.c b/tests/gem_unref_active_buffers.c
index ca775987..7a1bc937 100644
--- a/tests/gem_unref_active_buffers.c
+++ b/tests/gem_unref_active_buffers.c
@@ -74,7 +74,7 @@ igt_simple_main
load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
igt_assert(load_bo);
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
diff --git a/tests/gem_wait_render_timeout.c b/tests/gem_wait_render_timeout.c
index 0a833f72..e05b7ae2 100644
--- a/tests/gem_wait_render_timeout.c
+++ b/tests/gem_wait_render_timeout.c
@@ -104,16 +104,8 @@ static void blt_color_fill(struct intel_batchbuffer *batch,
const unsigned short height = pages/4;
const unsigned short width = 4096;
- if (intel_gen(batch->devid) >= 8) {
- BEGIN_BATCH(8);
- OUT_BATCH(MI_NOOP);
- OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 5 |
- COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- } else {
- BEGIN_BATCH(6);
- OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 4 |
- COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- }
+ COLOR_BLIT_COPY_BATCH_START(COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB);
OUT_BATCH((3 << 24) | /* 32 Bit Color */
(0xF0 << 16) | /* Raster OP copy background register */
0); /* Dest pitch is 0 */
diff --git a/tests/gem_write_read_ring_switch.c b/tests/gem_write_read_ring_switch.c
index f09b3dbf..f3407f94 100644
--- a/tests/gem_write_read_ring_switch.c
+++ b/tests/gem_write_read_ring_switch.c
@@ -80,7 +80,7 @@ static void run_test(int ring)
/* put some load onto the gpu to keep the light buffers active for long
* enough */
for (i = 0; i < 1000; i++) {
- BLIT_COPY_BATCH_START(batch->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
4096);
@@ -93,7 +93,7 @@ static void run_test(int ring)
ADVANCE_BATCH();
}
- COLOR_BLIT_COPY_BATCH_START(batch->devid, 0);
+ COLOR_BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xff << 16) |
128);
@@ -107,7 +107,7 @@ static void run_test(int ring)
/* Emit an empty batch so that signalled seqno on the target ring >
* signalled seqnoe on the blt ring. This is required to hit the bug. */
- BEGIN_BATCH(2);
+ BEGIN_BATCH(2, 0);
OUT_BATCH(MI_NOOP);
OUT_BATCH(MI_NOOP);
ADVANCE_BATCH();
@@ -116,14 +116,14 @@ static void run_test(int ring)
/* For the ring->ring sync it's important to only emit a read reloc, for
* otherwise the obj->last_write_seqno will be updated. */
if (ring == I915_EXEC_RENDER) {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
OUT_BATCH(0xffffffff); /* compare dword */
OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
OUT_BATCH(MI_NOOP);
ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_FLUSH_DW | 1);
OUT_BATCH(0); /* reserved */
OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
diff --git a/tests/kms_fbc_crc.c b/tests/kms_fbc_crc.c
index 95d40418..4675ee58 100644
--- a/tests/kms_fbc_crc.c
+++ b/tests/kms_fbc_crc.c
@@ -83,20 +83,33 @@ static const char *test_mode_str(enum test_mode mode)
return test_modes[mode];
}
-static void fill_blt(data_t *data, uint32_t handle, unsigned char color)
+static void fill_blt(data_t *data,
+ uint32_t handle,
+ struct igt_fb *fb,
+ unsigned char color)
{
drm_intel_bo *dst = gem_handle_to_libdrm_bo(data->bufmgr,
data->drm_fd,
"", handle);
struct intel_batchbuffer *batch;
+ unsigned flags;
+ int pitch;
batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
igt_assert(batch);
- COLOR_BLIT_COPY_BATCH_START(batch->devid, 0);
- OUT_BATCH((0 << 24) | (0xf0 << 16) | 0);
+ pitch = fb->stride;
+ flags = XY_COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB;
+ if (fb->tiling && batch->gen >= 4) {
+ flags |= XY_COLOR_BLT_TILED;
+ pitch /= 4;
+ }
+
+ COLOR_BLIT_COPY_BATCH_START(flags);
+ OUT_BATCH(3 << 24 | 0xf0 << 16 | pitch);
OUT_BATCH(0);
- OUT_BATCH(1 << 16 | 4);
+ OUT_BATCH(fb->height << 16 | fb->width);
OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(color);
ADVANCE_BATCH();
@@ -127,7 +140,7 @@ static void exec_nop(data_t *data, uint32_t handle, drm_intel_context *context)
igt_assert(batch);
/* add the reloc to make sure the kernel will think we write to dst */
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_BATCH_BUFFER_END);
OUT_BATCH(MI_NOOP);
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
@@ -226,7 +239,7 @@ static void test_crc(data_t *data, enum test_mode mode)
break;
case TEST_BLT:
case TEST_PAGE_FLIP_AND_BLT:
- fill_blt(data, handle, 0xff);
+ fill_blt(data, handle, data->fb, ~0);
break;
case TEST_RENDER:
case TEST_CONTEXT:
diff --git a/tests/kms_fence_pin_leak.c b/tests/kms_fence_pin_leak.c
index 93f4e16b..69f36b8f 100644
--- a/tests/kms_fence_pin_leak.c
+++ b/tests/kms_fence_pin_leak.c
@@ -54,7 +54,7 @@ static void exec_nop(data_t *data, uint32_t handle, drm_intel_context *context)
igt_assert(batch);
/* add the reloc to make sure the kernel will think we write to dst */
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_BATCH_BUFFER_END);
OUT_BATCH(MI_NOOP);
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
diff --git a/tests/kms_flip.c b/tests/kms_flip.c
index cb94f0b4..3d3aa9b0 100644
--- a/tests/kms_flip.c
+++ b/tests/kms_flip.c
@@ -179,7 +179,7 @@ static void emit_dummy_load__bcs(struct test_output *o)
igt_assert(target_bo);
for (i = 0; i < limit; i++) {
- BLIT_COPY_BATCH_START(devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
@@ -191,8 +191,8 @@ static void emit_dummy_load__bcs(struct test_output *o)
OUT_RELOC_FENCED(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
- if (IS_GEN6(devid) || IS_GEN7(devid)) {
- BEGIN_BATCH(3);
+ if (batch->gen >= 6) {
+ BEGIN_BATCH(3, 0);
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
diff --git a/tests/kms_mmio_vs_cs_flip.c b/tests/kms_mmio_vs_cs_flip.c
index a34809ac..c8bc702f 100644
--- a/tests/kms_mmio_vs_cs_flip.c
+++ b/tests/kms_mmio_vs_cs_flip.c
@@ -55,7 +55,7 @@ static void exec_nop(data_t *data, uint32_t handle, unsigned int ring)
igt_assert(bo);
/* add relocs to make sure the kernel will think we write to dst */
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_BATCH_BUFFER_END);
OUT_BATCH(MI_NOOP);
OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
@@ -81,7 +81,7 @@ static void exec_blt(data_t *data)
pitch = w * 4;
for (i = 0; i < 40; i++) {
- BLIT_COPY_BATCH_START(data->devid, 0);
+ BLIT_COPY_BATCH_START(0);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
diff --git a/tests/kms_psr_sink_crc.c b/tests/kms_psr_sink_crc.c
index 909d6ca7..49f95490 100644
--- a/tests/kms_psr_sink_crc.c
+++ b/tests/kms_psr_sink_crc.c
@@ -180,8 +180,7 @@ static void fill_blt(data_t *data, uint32_t handle, unsigned char color)
batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
igt_assert(batch);
- BEGIN_BATCH(5);
- OUT_BATCH(COLOR_BLT_CMD);
+ COLOR_BLIT_COPY_BATCH_START(0);
OUT_BATCH((1 << 24) | (0xf0 << 16) | 0);
OUT_BATCH(1 << 16 | 4);
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
@@ -214,7 +213,7 @@ static void exec_nop(data_t *data, uint32_t handle, drm_intel_context *context)
igt_assert(batch);
/* add the reloc to make sure the kernel will think we write to dst */
- BEGIN_BATCH(4);
+ BEGIN_BATCH(4, 1);
OUT_BATCH(MI_BATCH_BUFFER_END);
OUT_BATCH(MI_NOOP);
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
diff --git a/tests/pm_rps.c b/tests/pm_rps.c
index e2851782..ef382ec6 100644
--- a/tests/pm_rps.c
+++ b/tests/pm_rps.c
@@ -178,23 +178,19 @@ static void emit_store_dword_imm(uint32_t val)
if (!lh.has_ppgtt)
cmd |= MI_MEM_VIRTUAL;
- if (intel_gen(lh.devid) >= 8) {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
+ BEGIN_BATCH(4, 1);
+ OUT_BATCH(cmd);
+ if (batch->gen >= 8) {
OUT_RELOC(lh.target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
- OUT_BATCH(0);
OUT_BATCH(val);
- ADVANCE_BATCH();
} else {
- BEGIN_BATCH(4);
- OUT_BATCH(cmd);
OUT_BATCH(0); /* reserved */
OUT_RELOC(lh.target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(val);
- ADVANCE_BATCH();
}
+ ADVANCE_BATCH();
}
#define LOAD_HELPER_PAUSE_USEC 500
diff --git a/tests/prime_nv_pcopy.c b/tests/prime_nv_pcopy.c
index fb0f62ff..218f4ba0 100644
--- a/tests/prime_nv_pcopy.c
+++ b/tests/prime_nv_pcopy.c
@@ -166,7 +166,7 @@ BEGIN_NVXX(struct nouveau_pushbuf *push, int subc, int mthd, int size)
static void
noop_intel(drm_intel_bo *bo)
{
- BEGIN_BATCH(3);
+ BEGIN_BATCH(3, 1);
OUT_BATCH(MI_NOOP);
OUT_BATCH(MI_BATCH_BUFFER_END);
OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER,
diff --git a/tools/intel_perf_counters.c b/tools/intel_perf_counters.c
index e6eafb20..739f926d 100644
--- a/tools/intel_perf_counters.c
+++ b/tools/intel_perf_counters.c
@@ -332,7 +332,7 @@ gen5_get_counters(void)
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
- BEGIN_BATCH(6);
+ BEGIN_BATCH(6, 2);
OUT_BATCH(GEN5_MI_REPORT_PERF_COUNT | MI_COUNTER_SET_0);
OUT_RELOC(stats_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
@@ -380,7 +380,7 @@ gen6_get_counters(void)
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
- BEGIN_BATCH(3);
+ BEGIN_BATCH(3, 1);
OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT | (3 - 2));
OUT_RELOC(stats_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
@@ -410,7 +410,7 @@ gen7_get_counters(void)
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
- BEGIN_BATCH(3);
+ BEGIN_BATCH(3, 1);
OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT | (3 - 2));
OUT_RELOC(stats_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);