#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/exynos_iovmm.h>
#include "g2d.h"
#include "g2d_task.h"
+static void g2d_schedule_task(struct g2d_task *task)
+{
+ struct g2d_device *g2d_dev = task->g2d_dev;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Unconditional invocation of pm_runtime_get_sync() has no side effect
+ * in g2d_schedule(). It just increases the usage count of RPM if this
+ * function skips calling g2d_device_run(). The skip only happens when
+ * there is no task to run in g2d_dev->tasks_prepared.
+ * If pm_runtime_get_sync() enabled power, there must be a task in
+ * g2d_dev->tasks_prepared.
+ */
+ ret = pm_runtime_get_sync(g2d_dev->dev);
+ if (ret < 0) {
+ dev_err(g2d_dev->dev, "Failed to enable power (%d)\n", ret);
+ /* TODO: cancel task */
+ return;
+ }
+
+ spin_lock_irqsave(&g2d_dev->lock_task, flags);
+
+ list_add_tail(&task->node, &g2d_dev->tasks_active);
+
+ change_task_state_prepared(task);
+ change_task_state_active(task);
+ /*
+ * g2d_device_run() is not reentrant while g2d_schedule() is
+ * reentrant g2d_device_run() should be protected with
+ * g2d_dev->lock_task from race.
+ */
+ if (g2d_device_run(g2d_dev, task) < 0) {
+ pm_runtime_put(g2d_dev->dev);
+ /* TODO: cancel task */
+ }
+
+ spin_unlock_irqrestore(&g2d_dev->lock_task, flags);
+}
+
+static void g2d_task_schedule_work(struct work_struct *work)
+{
+ g2d_schedule_task(container_of(work, struct g2d_task, work));
+}
+
+static void g2d_queuework_task(struct kref *kref)
+{
+ struct g2d_task *task = container_of(kref, struct g2d_task, starter);
+ struct g2d_device *g2d_dev = task->g2d_dev;
+ bool failed;
+
+ failed = !queue_work(g2d_dev->schedule_workq, &task->work);
+
+ BUG_ON(failed);
+}
+
+void g2d_start_task(struct g2d_task *task)
+{
+ reinit_completion(&task->completion);
+
+ kref_put(&task->starter, g2d_queuework_task);
+}
+
+struct g2d_task *g2d_get_free_task(struct g2d_device *g2d_dev)
+{
+ struct g2d_task *task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&g2d_dev->lock_task, flags);
+
+ if (list_empty(&g2d_dev->tasks_free)) {
+ spin_unlock_irqrestore(&g2d_dev->lock_task, flags);
+ return NULL;
+ }
+
+ task = list_first_entry(&g2d_dev->tasks_free, struct g2d_task, node);
+ list_del_init(&task->node);
+ INIT_WORK(&task->work, g2d_task_schedule_work);
+
+ init_task_state(task);
+
+ spin_unlock_irqrestore(&g2d_dev->lock_task, flags);
+
+ return task;
+}
+
+void g2d_put_free_task(struct g2d_device *g2d_dev, struct g2d_task *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&g2d_dev->lock_task, flags);
+
+ clear_task_state(task);
+
+ list_add(&task->node, &g2d_dev->tasks_free);
+
+ spin_unlock_irqrestore(&g2d_dev->lock_task, flags);
+}
+
void g2d_destroy_tasks(struct g2d_device *g2d_dev)
{
struct g2d_task *task, *next;
}
spin_unlock_irqrestore(&g2d_dev->lock_task, flags);
+
+ destroy_workqueue(g2d_dev->schedule_workq);
}
static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev)
task->g2d_dev = g2d_dev;
+ init_completion(&task->completion);
+
return task;
err_page:
kfree(task);
struct g2d_task *task;
unsigned int i;
+ g2d_dev->schedule_workq = create_singlethread_workqueue("g2dscheduler");
+ if (!g2d_dev->schedule_workq)
+ return -ENOMEM;
+
for (i = 0; i < G2D_MAX_JOBS; i++) {
task = g2d_create_task(g2d_dev);
#define __EXYNOS_G2D_TASK_H__
#include <linux/dma-buf.h>
+#include <linux/workqueue.h>
#include "g2d_format.h"
struct g2d_buffer buffer[G2D_MAX_PLANES];
};
-#define G2D_TASKSTATE_WAITING 1
-#define G2D_TASKSTATE_UNPREPARED 2
-#define G2D_TASKSTATE_PREPARED 3
-#define G2D_TASKSTATE_ACTIVE 4
-#define G2D_TASKSTATE_PROCESSED 5
-#define G2D_TASKSTATE_ERROR 6
-#define G2D_TASKSTATE_KILLED 7
-#define G2D_TASKSTATE_TIMEOUT 8
+#define G2D_TASKSTATE_WAITING (1 << 1)
+#define G2D_TASKSTATE_UNPREPARED (1 << 2)
+#define G2D_TASKSTATE_PREPARED (1 << 3)
+#define G2D_TASKSTATE_ACTIVE (1 << 4)
+#define G2D_TASKSTATE_PROCESSED (1 << 5)
+#define G2D_TASKSTATE_ERROR (1 << 6)
+#define G2D_TASKSTATE_KILLED (1 << 7)
+#define G2D_TASKSTATE_TIMEOUT (1 << 8)
struct g2d_context;
struct g2d_device;
struct g2d_device *g2d_dev;
unsigned int job_id;
+ unsigned long state;
+ struct kref starter;
struct g2d_layer source[G2D_MAX_IMAGES];
struct g2d_layer target;
unsigned int cmd_count;
unsigned int priority;
+
+ struct work_struct work;
+ struct completion completion;
};
+/* The below macros should be called with g2d_device.lock_tasks held */
+#define change_task_state_active(task) do { \
+ (task)->state &= ~G2D_TASKSTATE_PREPARED; \
+ (task)->state |= G2D_TASKSTATE_ACTIVE; \
+} while (0)
+
+#define change_task_state_prepared(task) do { \
+ (task)->state |= G2D_TASKSTATE_PREPARED; \
+ (task)->state &= ~G2D_TASKSTATE_UNPREPARED; \
+} while (0)
+
+static inline void init_task_state(struct g2d_task *task)
+{
+ task->state = G2D_TASKSTATE_UNPREPARED;
+}
+
+static inline void clear_task_state(struct g2d_task *task)
+{
+ task->state = 0;
+}
+
void g2d_destroy_tasks(struct g2d_device *g2d_dev);
int g2d_create_tasks(struct g2d_device *g2d_dev);
+struct g2d_task *g2d_get_free_task(struct g2d_device *g2d_dev);
+void g2d_put_free_task(struct g2d_device *g2d_dev, struct g2d_task *task);
+
+void g2d_start_task(struct g2d_task *task);
+
#endif /*__EXYNOS_G2D_TASK_H__*/