#include "drmP.h"
#include "drm_flip_work.h"
+/**
+ * drm_flip_work_allocate_task - allocate a flip-work task
+ * @data: data associated to the task
+ * @flags: allocator flags
+ *
+ * Allocate a drm_flip_task object and attach private data to it.
+ */
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
+{
+ struct drm_flip_task *task;
+
+ task = kzalloc(sizeof(*task), flags);
+ if (task)
+ task->data = data;
+
+ return task;
+}
+EXPORT_SYMBOL(drm_flip_work_allocate_task);
+
+/**
+ * drm_flip_work_queue_task - queue a specific task
+ * @work: the flip-work
+ * @task: the task to handle
+ *
+ * Queues task, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ list_add_tail(&task->node, &work->queued);
+ spin_unlock_irqrestore(&work->lock, flags);
+}
+EXPORT_SYMBOL(drm_flip_work_queue_task);
+
/**
* drm_flip_work_queue - queue work
* @work: the flip-work
*/
void drm_flip_work_queue(struct drm_flip_work *work, void *val)
{
- if (kfifo_put(&work->fifo, val)) {
- atomic_inc(&work->pending);
+ struct drm_flip_task *task;
+
+ task = drm_flip_work_allocate_task(val,
+ drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
+ if (task) {
+ drm_flip_work_queue_task(work, task);
} else {
- DRM_ERROR("%s fifo full!\n", work->name);
+ DRM_ERROR("%s could not allocate task!\n", work->name);
work->func(work, val);
}
}
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq)
{
- uint32_t pending = atomic_read(&work->pending);
- atomic_add(pending, &work->count);
- atomic_sub(pending, &work->pending);
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ list_splice_tail(&work->queued, &work->commited);
+ INIT_LIST_HEAD(&work->queued);
+ spin_unlock_irqrestore(&work->lock, flags);
queue_work(wq, &work->worker);
}
EXPORT_SYMBOL(drm_flip_work_commit);
static void flip_worker(struct work_struct *w)
{
struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
- uint32_t count = atomic_read(&work->count);
- void *val = NULL;
+ struct list_head tasks;
+ unsigned long flags;
- atomic_sub(count, &work->count);
+ while (1) {
+ struct drm_flip_task *task, *tmp;
- while(count--)
- if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
- work->func(work, val);
+ INIT_LIST_HEAD(&tasks);
+ spin_lock_irqsave(&work->lock, flags);
+ list_splice_tail(&work->commited, &tasks);
+ INIT_LIST_HEAD(&work->commited);
+ spin_unlock_irqrestore(&work->lock, flags);
+
+ if (list_empty(&tasks))
+ break;
+
+ list_for_each_entry_safe(task, tmp, &tasks, node) {
+ work->func(work, task->data);
+ kfree(task);
+ }
+ }
}
/**
int drm_flip_work_init(struct drm_flip_work *work, int size,
const char *name, drm_flip_func_t func)
{
- int ret;
-
work->name = name;
- atomic_set(&work->count, 0);
- atomic_set(&work->pending, 0);
+ INIT_LIST_HEAD(&work->queued);
+ INIT_LIST_HEAD(&work->commited);
+ spin_lock_init(&work->lock);
work->func = func;
- ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
- if (ret) {
- DRM_ERROR("could not allocate %s fifo\n", name);
- return ret;
- }
-
INIT_WORK(&work->worker, flip_worker);
return 0;
*/
void drm_flip_work_cleanup(struct drm_flip_work *work)
{
- WARN_ON(!kfifo_is_empty(&work->fifo));
- kfifo_free(&work->fifo);
+ WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
}
EXPORT_SYMBOL(drm_flip_work_cleanup);
#define DRM_FLIP_WORK_H
#include <linux/kfifo.h>
+#include <linux/spinlock.h>
#include <linux/workqueue.h>
/**
*
* Util to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all safe (and lockless)
- * for up to one producer and once consumer at a time. The single-consumer
- * aspect is ensured by committing the queued work to a single work-queue.
+ * bo's, etc until after vblank. The APIs are all thread-safe.
+ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
+ * in atomic context.
*/
struct drm_flip_work;
*/
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
+/**
+ * struct drm_flip_task - flip work task
+ * @node: list entry element
+ * @data: data to pass to work->func
+ */
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
/**
* struct drm_flip_work - flip work queue
* @name: debug name
- * @pending: number of queued but not committed items
- * @count: number of committed items
* @func: callback fxn called for each committed item
* @worker: worker which calls @func
- * @fifo: queue of committed items
+ * @queued: queued tasks
+ * @commited: commited tasks
+ * @lock: lock to access queued and commited lists
*/
struct drm_flip_work {
const char *name;
- atomic_t pending, count;
drm_flip_func_t func;
struct work_struct worker;
- DECLARE_KFIFO_PTR(fifo, void *);
+ struct list_head queued;
+ struct list_head commited;
+ spinlock_t lock;
};
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);