summaryrefslogtreecommitdiff
path: root/src/util/u_queue.c
diff options
context:
space:
mode:
authorMarek Olšák <marek.olsak@amd.com>2017-07-10 21:17:04 +0200
committerMarek Olšák <marek.olsak@amd.com>2017-07-17 10:57:20 -0400
commit59ad7697705a65940f6370c28729aff87d446b46 (patch)
tree6b66cda9329a9648d5767035aae9c78e52dde02d /src/util/u_queue.c
parent465bb47d6f6c1d41ddb6d99fee31f2df2fbc7b5a (diff)
downloadmesa-59ad7697705a65940f6370c28729aff87d446b46.tar.gz
mesa-59ad7697705a65940f6370c28729aff87d446b46.tar.bz2
mesa-59ad7697705a65940f6370c28729aff87d446b46.zip
util/u_queue: add an option to resize the queue when it's full
Consider the following situation: mtx_lock(mutex); do_something(); util_queue_add_job(...); mtx_unlock(mutex); If the queue is full, util_queue_add_job will wait for a free slot. If the job which is currently being executed tries to lock the mutex, it will be stuck forever, because util_queue_add_job is stuck. The deadlock can be trivially resolved by increasing the queue size (reallocating the queue) in util_queue_add_job if the queue is full. Then util_queue_add_job becomes wait-free. radeonsi will use it. Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Diffstat (limited to 'src/util/u_queue.c')
-rw-r--r--src/util/u_queue.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
index cb5903014e7..49361c3dadf 100644
--- a/src/util/u_queue.c
+++ b/src/util/u_queue.c
@@ -204,6 +204,7 @@ util_queue_init(struct util_queue *queue,
memset(queue, 0, sizeof(*queue));
queue->name = name;
+ queue->flags = flags;
queue->num_threads = num_threads;
queue->max_jobs = max_jobs;
@@ -329,9 +330,39 @@ util_queue_add_job(struct util_queue *queue,
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
- /* if the queue is full, wait until there is space */
- while (queue->num_queued == queue->max_jobs)
- cnd_wait(&queue->has_space_cond, &queue->lock);
+ if (queue->num_queued == queue->max_jobs) {
+ if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL) {
+ /* If the queue is full, make it larger to avoid waiting for a free
+ * slot.
+ */
+ unsigned new_max_jobs = queue->max_jobs + 8;
+ struct util_queue_job *jobs =
+ (struct util_queue_job*)calloc(new_max_jobs,
+ sizeof(struct util_queue_job));
+ assert(jobs);
+
+ /* Copy all queued jobs into the new list. */
+ unsigned num_jobs = 0;
+ unsigned i = queue->read_idx;
+
+ do {
+ jobs[num_jobs++] = queue->jobs[i];
+ i = (i + 1) % queue->max_jobs;
+ } while (i != queue->write_idx);
+
+ assert(num_jobs == queue->num_queued);
+
+ free(queue->jobs);
+ queue->jobs = jobs;
+ queue->read_idx = 0;
+ queue->write_idx = num_jobs;
+ queue->max_jobs = new_max_jobs;
+ } else {
+ /* Wait until there is a free slot. */
+ while (queue->num_queued == queue->max_jobs)
+ cnd_wait(&queue->has_space_cond, &queue->lock);
+ }
+ }
ptr = &queue->jobs[queue->write_idx];
assert(ptr->job == NULL);