return "unbound";
}
+static void irq_fence_array_work(struct irq_work *wrk)
+{
+ struct fence_array *array = container_of(wrk, typeof(*array), work);
+
+ fence_signal(&array->base);
+ fence_put(&array->base);
+}
+
static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
{
struct fence_array_cb *array_cb =
struct fence_array *array = array_cb->array;
if (atomic_dec_and_test(&array->num_pending))
- fence_signal(&array->base);
- fence_put(&array->base);
+ irq_work_queue(&array->work);
+ else
+ fence_put(&array->base);
}
static bool fence_array_enable_signaling(struct fence *fence)
spin_lock_init(&array->lock);
fence_init(&array->base, &fence_array_ops, &array->lock,
context, seqno);
+ init_irq_work(&array->work, irq_fence_array_work);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
#define __LINUX_FENCE_ARRAY_H
#include <linux/fence.h>
+#include <linux/irq_work.h>
/**
* struct fence_array_cb - callback helper for fence array
unsigned num_fences;
atomic_t num_pending;
struct fence **fences;
+
+ struct irq_work work;
};
extern const struct fence_ops fence_array_ops;