--- /dev/null
+libGLES_mali_default_8a_32-o-r8p0.so
\ No newline at end of file
--- /dev/null
+libGLES_mali_default_8a_32-n-r7p0.so
\ No newline at end of file
--- /dev/null
+libGLES_mali_default_8a_64-n-r7p0.so
\ No newline at end of file
#include <linux/sched.h>
#include <linux/atomic.h>
#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+#include <linux/dma-fence.h>
+#else
#include <linux/fence.h>
#endif
+#endif
#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
mali_pp_job_initialize();
- mali_timeline_initialize();
+ err = mali_timeline_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
err = mali_session_initialize();
if (_MALI_OSK_ERR_OK != err) {
/* Initialize the dma fence context.*/
#if defined(CONFIG_MALI_DMA_BUF_FENCE)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ session->fence_context = dma_fence_context_alloc(1);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
session->fence_context = fence_context_alloc(1);
_mali_osk_atomic_init(&session->fence_seqno, 0);
#else
#endif
#if defined(CONFIG_MALI_DMA_BUF_FENCE)
#include "linux/mali_dma_fence.h"
-#include <linux/fence.h>
#endif
typedef enum pp_job_status {
#if defined(CONFIG_MALI_DMA_BUF_FENCE)
struct mali_dma_fence_context dma_fence_context; /**< The mali dma fence context to record dma fence waiters that this job wait for */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *rendered_dma_fence; /**< the new dma fence link to this job */
+#else
struct fence *rendered_dma_fence; /**< the new dma fence link to this job */
#endif
+#endif
};
void mali_pp_job_initialize(void);
#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+_mali_osk_wq_work_t *sync_fence_callback_work_t = NULL;
+_mali_osk_spinlock_irq_t *sync_fence_callback_list_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(sync_fence_callback_queue);
+#endif
+
/*
* Following three elements are used to record how many
* gp, physical pp or virtual pp jobs are delayed in the whole
static void mali_timeline_sync_fence_callback(struct mali_internal_sync_fence *sync_fence, struct mali_internal_sync_fence_waiter *sync_fence_waiter)
#endif
{
- struct mali_timeline_system *system;
- struct mali_timeline_waiter *waiter;
struct mali_timeline_tracker *tracker;
- mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
- u32 tid = _mali_osk_get_tid();
- mali_bool is_aborting = MALI_FALSE;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
- int fence_status = sync_fence->status;
-#else
- int fence_status = atomic_read(&sync_fence->status);
-#endif
- MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_IGNORE(sync_fence);
MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
MALI_DEBUG_ASSERT_POINTER(tracker);
- system = tracker->system;
- MALI_DEBUG_ASSERT_POINTER(system);
- MALI_DEBUG_ASSERT_POINTER(system->session);
-
- mali_spinlock_reentrant_wait(system->spinlock, tid);
+ _mali_osk_spinlock_irq_lock(sync_fence_callback_list_lock);
+ _mali_osk_list_addtail(&tracker->sync_fence_signal_list, &sync_fence_callback_queue);
+ _mali_osk_spinlock_irq_unlock(sync_fence_callback_list_lock);
- is_aborting = system->session->is_aborting;
- if (!is_aborting && (0 > fence_status)) {
- MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
- tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
- }
-
- waiter = tracker->waiter_sync;
- MALI_DEBUG_ASSERT_POINTER(waiter);
-
- tracker->sync_fence = NULL;
- tracker->fence.sync_fd = -1;
-
- schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
-
- /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
- if (is_aborting) {
- _mali_osk_wait_queue_wake_up(system->wait_queue);
- }
-
- mali_spinlock_reentrant_signal(system->spinlock, tid);
-
- /*
- * Older versions of Linux, before 3.5, doesn't support fput() in interrupt
- * context. For those older kernels, allocate a list object and put the
- * fence object on that and defer the call to sync_fence_put() to a workqueue.
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
- {
- struct mali_deferred_fence_put_entry *obj;
-
- obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
- if (obj) {
- unsigned long flags;
- mali_bool schedule = MALI_FALSE;
-
- obj->fence = sync_fence;
-
- spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
- if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
- schedule = MALI_TRUE;
- hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
- spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
-
- if (schedule)
- schedule_delayed_work(&delayed_sync_fence_put, 0);
- }
- }
-#else
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
- sync_fence_put(sync_fence);
-#else
- fput(sync_fence->file);
-#endif
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
-
- if (!is_aborting) {
- mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
- }
+ _mali_osk_wq_schedule_work(sync_fence_callback_work_t);
}
#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
* This function returns true if the callback is successfully removed,
* or false if the fence has already been signaled.
*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ bool ret = dma_fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence,
+ &pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base);
+
+#else
bool ret = fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence,
&pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base);
+#endif
if (ret) {
fence_is_signaled = MALI_FALSE;
}
return point;
}
-void mali_timeline_initialize(void)
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+static void mali_timeline_do_sync_fence_callback(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_timeline_tracker *tracker;
+ struct mali_timeline_tracker *tmp_tracker;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release
+ * the lock before we start deleting the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(sync_fence_callback_list_lock);
+ _mali_osk_list_move_list(&sync_fence_callback_queue, &list);
+ _mali_osk_spinlock_irq_unlock(sync_fence_callback_list_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(tracker, tmp_tracker, &list,
+ struct mali_timeline_tracker, sync_fence_signal_list) {
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ mali_bool is_aborting = MALI_FALSE;
+ int fence_status = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ struct sync_fence *sync_fence = NULL;
+#else
+ struct mali_internal_sync_fence *sync_fence = NULL;
+#endif
+ struct mali_timeline_system *system = NULL;
+ struct mali_timeline_waiter *waiter = NULL;
+
+ _mali_osk_list_delinit(&tracker->sync_fence_signal_list);
+
+ sync_fence = tracker->sync_fence;
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ fence_status = sync_fence->status;
+#else
+ fence_status = atomic_read(&sync_fence->status);
+#endif
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ is_aborting = system->session->is_aborting;
+ if (!is_aborting && (0 > fence_status)) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ }
+
+ waiter = tracker->waiter_sync;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ tracker->sync_fence = NULL;
+ tracker->fence.sync_fd = -1;
+
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /*
+ * Older versions of Linux, before 3.5, doesn't support fput() in interrupt
+ * context. For those older kernels, allocate a list object and put the
+ * fence object on that and defer the call to sync_fence_put() to a workqueue.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+ {
+ struct mali_deferred_fence_put_entry *obj;
+
+ obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
+ if (obj) {
+ unsigned long flags;
+ mali_bool schedule = MALI_FALSE;
+
+ obj->fence = sync_fence;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
+ schedule = MALI_TRUE;
+ hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ if (schedule)
+ schedule_delayed_work(&delayed_sync_fence_put, 0);
+ }
+ }
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ sync_fence_put(sync_fence);
+#else
+ fput(sync_fence->file);
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+ if (!is_aborting) {
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+ }
+}
+#endif
+_mali_osk_errcode_t mali_timeline_initialize(void)
{
_mali_osk_atomic_init(&gp_tracker_count, 0);
_mali_osk_atomic_init(&phy_pp_tracker_count, 0);
_mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ sync_fence_callback_list_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);
+ if (NULL == sync_fence_callback_list_lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ sync_fence_callback_work_t = _mali_osk_wq_create_work(
+ mali_timeline_do_sync_fence_callback, NULL);
+
+ if (NULL == sync_fence_callback_work_t) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
}
+
void mali_timeline_terminate(void)
{
_mali_osk_atomic_term(&gp_tracker_count);
_mali_osk_atomic_term(&phy_pp_tracker_count);
_mali_osk_atomic_term(&virt_pp_tracker_count);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+ if (NULL != sync_fence_callback_list_lock) {
+ _mali_osk_spinlock_irq_term(sync_fence_callback_list_lock);
+ sync_fence_callback_list_lock = NULL;
+ }
+
+ if (NULL != sync_fence_callback_work_t) {
+ _mali_osk_wq_delete_work(sync_fence_callback_work_t);
+ sync_fence_callback_work_t = NULL;
+ }
+#endif
}
#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job));
else
MALI_DEBUG_PRINT(2, ("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
- tracker_type, tracker->point, state_char,
- tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
}
#else
(unsigned int)(uintptr_t)(tracker->job));
else
MALI_DEBUG_PRINT(2, ("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
- tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
- (unsigned int)(uintptr_t)(tracker->job)));
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ (unsigned int)(uintptr_t)(tracker->job)));
} else {
if (print_ctx)
_mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
#else
if (0 != tracker->trigger_ref_count) {
MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
- tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
- tracker->job));
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
} else {
MALI_PRINT(("TL: %s %u %c job:(0x%08X)\n",
tracker_type, tracker->point, state_char,
timeline_id_to_string((enum mali_timeline_id)i));
else
MALI_DEBUG_PRINT(2, ("TL: Timeline %s: oldest (%u) next(%u)\n",
- timeline_id_to_string((enum mali_timeline_id)i), timeline->point_oldest, timeline->point_next));
+ timeline_id_to_string((enum mali_timeline_id)i), timeline->point_oldest, timeline->point_next));
mali_timeline_debug_print_timeline(timeline, print_ctx);
num_printed++;
struct mali_internal_sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */
#endif
_mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+ _mali_osk_list_t sync_fence_signal_list; /** < List node used to singal sync fence callback function. */
+
#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
#if defined(CONFIG_MALI_DMA_BUF_FENCE)
*/
void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
-void mali_timeline_initialize(void);
+_mali_osk_errcode_t mali_timeline_initialize(void);
void mali_timeline_terminate(void);
static DEFINE_SPINLOCK(mali_dma_fence_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static bool mali_dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ MALI_IGNORE(fence);
+ return true;
+}
+
+static const char *mali_dma_fence_get_driver_name(struct dma_fence *fence)
+{
+ MALI_IGNORE(fence);
+ return "mali";
+}
+
+static const char *mali_dma_fence_get_timeline_name(struct dma_fence *fence)
+{
+ MALI_IGNORE(fence);
+ return "mali_dma_fence";
+}
+
+static const struct dma_fence_ops mali_dma_fence_ops = {
+ .get_driver_name = mali_dma_fence_get_driver_name,
+ .get_timeline_name = mali_dma_fence_get_timeline_name,
+ .enable_signaling = mali_dma_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = dma_fence_default_wait,
+ .release = NULL
+};
+#else
static bool mali_dma_fence_enable_signaling(struct fence *fence)
{
MALI_IGNORE(fence);
.wait = fence_default_wait,
.release = NULL
};
+#endif
static void mali_dma_fence_context_cleanup(struct mali_dma_fence_context *dma_fence_context)
{
for (i = 0; i < dma_fence_context->num_dma_fence_waiter; i++) {
if (dma_fence_context->mali_dma_fence_waiters[i]) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
+ &dma_fence_context->mali_dma_fence_waiters[i]->base);
+ dma_fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
+
+#else
fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
&dma_fence_context->mali_dma_fence_waiters[i]->base);
fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
+#endif
kfree(dma_fence_context->mali_dma_fence_waiters[i]);
dma_fence_context->mali_dma_fence_waiters[i] = NULL;
}
dma_fence_context->cb_func(dma_fence_context->pp_job_ptr);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_dma_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+#else
static void mali_dma_fence_callback(struct fence *fence, struct fence_cb *cb)
+#endif
{
struct mali_dma_fence_waiter *dma_fence_waiter = NULL;
struct mali_dma_fence_context *dma_fence_context = NULL;
schedule_work(&dma_fence_context->work_handle);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct dma_fence *fence)
+#else
static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct fence *fence)
+#endif
{
int ret = 0;
struct mali_dma_fence_waiter *dma_fence_waiter;
return _MALI_OSK_ERR_NOMEM;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_get(fence);
+#else
fence_get(fence);
-
+#endif
dma_fence_waiter->fence = fence;
dma_fence_waiter->parent = dma_fence_context;
atomic_inc(&dma_fence_context->count);
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ ret = dma_fence_add_callback(fence, &dma_fence_waiter->base,
+ mali_dma_fence_callback);
+#else
ret = fence_add_callback(fence, &dma_fence_waiter->base,
mali_dma_fence_callback);
+#endif
if (0 > ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_put(fence);
+#else
fence_put(fence);
+#endif
kfree(dma_fence_waiter);
atomic_dec(&dma_fence_context->count);
if (-ENOENT == ret) {
}
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *mali_dma_fence_new(u32 context, u32 seqno)
+ #else
struct fence *mali_dma_fence_new(u32 context, u32 seqno)
+#endif
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *fence = NULL;
+ fence = kzalloc(sizeof(struct dma_fence), GFP_KERNEL);
+#else
struct fence *fence = NULL;
-
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
-
+ fence = kzalloc(sizeof(struct fence), GFP_KERNEL);
+#endif
if (NULL == fence) {
MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create dma fence.\n"));
return fence;
}
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_init(fence,
+ &mali_dma_fence_ops,
+ &mali_dma_fence_lock,
+ context, seqno);
+#else
fence_init(fence,
&mali_dma_fence_ops,
&mali_dma_fence_lock,
context, seqno);
-
+#endif
return fence;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+void mali_dma_fence_signal_and_put(struct dma_fence **fence)
+#else
void mali_dma_fence_signal_and_put(struct fence **fence)
+#endif
{
MALI_DEBUG_ASSERT_POINTER(fence);
MALI_DEBUG_ASSERT_POINTER(*fence);
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_signal(*fence);
+ dma_fence_put(*fence);
+#else
fence_signal(*fence);
fence_put(*fence);
+#endif
*fence = NULL;
}
struct reservation_object *dma_reservation_object)
{
_mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
- struct fence *exclusive_fence = NULL;
u32 shared_count = 0, i;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *exclusive_fence = NULL;
+ struct dma_fence **shared_fences = NULL;
+#else
+ struct fence *exclusive_fence = NULL;
struct fence **shared_fences = NULL;
-
+#endif
MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
ended:
if (exclusive_fence)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_put(exclusive_fence);
+#else
fence_put(exclusive_fence);
+#endif
if (shared_fences) {
for (i = 0; i < shared_count; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_put(shared_fences[i]);
+#else
fence_put(shared_fences[i]);
+#endif
}
kfree(shared_fences);
}
#define _MALI_DMA_FENCE_H_
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+#include <linux/dma-fence.h>
+#else
#include <linux/fence.h>
+#endif
#include <linux/reservation.h>
#endif
typedef void (*mali_dma_fence_context_callback_func_t)(void *pp_job_ptr);
struct mali_dma_fence_waiter {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *fence;
+ struct dma_fence_cb base;
+#else
struct fence_cb base;
- struct mali_dma_fence_context *parent;
struct fence *fence;
+#endif
+ struct mali_dma_fence_context *parent;
};
struct mali_dma_fence_context {
* @param seqno A linearly increasing sequence number for this context
* @return the new dma fence if success, or NULL on failure.
*/
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *mali_dma_fence_new(u32 context, u32 seqno);
+ #else
struct fence *mali_dma_fence_new(u32 context, u32 seqno);
-
+#endif
/* Signal and put dma fence
* @param fence The dma fence to signal and put
*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+void mali_dma_fence_signal_and_put(struct dma_fence **fence);
+#else
void mali_dma_fence_signal_and_put(struct fence **fence);
-
+#endif
/**
* Initialize a mali dma fence context for pp job.
* @param dma_fence_context The mali dma fence context to initialize.
*/
#define MALI_INTERNAL_SYNC_IOC_FENCE_INFO _IOWR('>', 2, struct mali_internal_sync_info_data)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const struct dma_fence_ops fence_ops;
+#else
static const struct fence_ops fence_ops;
+#endif
static const struct file_operations sync_fence_fops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct dma_fence *fence)
+#else
static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct fence *fence)
+#endif
{
MALI_DEBUG_ASSERT_POINTER(fence);
return container_of(fence, struct mali_internal_sync_point, base);
return NULL;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_internal_fence_check_cb_func(struct dma_fence *fence, struct dma_fence_cb *cb)
+#else
static void mali_internal_fence_check_cb_func(struct fence *fence, struct fence_cb *cb)
+#endif
{
struct mali_internal_sync_fence_cb *check;
struct mali_internal_sync_fence *sync_fence;
wake_up_all(&sync_fence->wq);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence *sync_fence, struct dma_fence *sync_pt)
+#else
static void mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence *sync_fence, struct fence *sync_pt)
+#endif
{
int fence_num = 0;
MALI_DEBUG_ASSERT_POINTER(sync_fence);
sync_fence->cbs[fence_num].base = sync_pt;
sync_fence->cbs[fence_num].sync_fence = sync_fence;
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ if (!dma_fence_add_callback(sync_pt, &sync_fence->cbs[fence_num].cb, mali_internal_fence_check_cb_func)) {
+ dma_fence_get(sync_pt);
+ atomic_inc(&sync_fence->num_fences);
+ atomic_inc(&sync_fence->status);
+ }
+#else
if (!fence_add_callback(sync_pt, &sync_fence->cbs[fence_num].cb, mali_internal_fence_check_cb_func)) {
fence_get(sync_pt);
atomic_inc(&sync_fence->num_fences);
atomic_inc(&sync_fence->status);
}
+#endif
}
static int mali_internal_sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
}
kref_init(&sync_timeline->kref_count);
sync_timeline->ops = ops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ sync_timeline->fence_context = dma_fence_context_alloc(1);
+#else
sync_timeline->fence_context = fence_context_alloc(1);
+#endif
strlcpy(sync_timeline->name, name, sizeof(sync_timeline->name));
INIT_LIST_HEAD(&sync_timeline->sync_pt_list_head);
list_for_each_entry_safe(sync_pt, next, &sync_timeline->sync_pt_list_head,
sync_pt_list) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ if (dma_fence_is_signaled_locked(&sync_pt->base))
+#else
if (fence_is_signaled_locked(&sync_pt->base))
+#endif
list_del_init(&sync_pt->sync_pt_list);
}
}
spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
kref_get(&sync_timeline->kref_count);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
+ sync_timeline->fence_context, ++sync_timeline->value);
+#else
fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
sync_timeline->fence_context, ++sync_timeline->value);
+#endif
INIT_LIST_HEAD(&sync_pt->sync_pt_list);
spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
sync_fence->cbs[0].base = &sync_pt->base;
sync_fence->cbs[0].sync_fence = sync_fence;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ if (dma_fence_add_callback(&sync_pt->base, &sync_fence->cbs[0].cb,
+ mali_internal_fence_check_cb_func))
+#else
if (fence_add_callback(&sync_pt->base, &sync_fence->cbs[0].cb,
- mali_internal_fence_check_cb_func))
+ mali_internal_fence_check_cb_func))
+#endif
atomic_dec(&sync_fence->status);
return sync_fence;
}
for (i = j = 0; i < num_fence1 && j < num_fence2; ) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *fence1 = sync_fence1->cbs[i].base;
+ struct dma_fence *fence2 = sync_fence2->cbs[j].base;
+#else
struct fence *fence1 = sync_fence1->cbs[i].base;
struct fence *fence2 = sync_fence2->cbs[j].base;
-
+#endif
if (fence1->context < fence2->context) {
mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
return ret;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const char *mali_internal_fence_get_driver_name(struct dma_fence *fence)
+#else
static const char *mali_internal_fence_get_driver_name(struct fence *fence)
+#endif
{
struct mali_internal_sync_point *sync_pt;
struct mali_internal_sync_timeline *parent;
return parent->ops->driver_name;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const char *mali_internal_fence_get_timeline_name(struct dma_fence *fence)
+#else
static const char *mali_internal_fence_get_timeline_name(struct fence *fence)
+#endif
{
struct mali_internal_sync_point *sync_pt;
struct mali_internal_sync_timeline *parent;
return parent->name;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_internal_fence_release(struct dma_fence *fence)
+#else
static void mali_internal_fence_release(struct fence *fence)
+#endif
{
unsigned long flags;
struct mali_internal_sync_point *sync_pt;
parent->ops->free_pt(sync_pt);
kref_put(&parent->kref_count, mali_internal_sync_timeline_free);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_free(&sync_pt->base);
+#else
fence_free(&sync_pt->base);
+#endif
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static bool mali_internal_fence_signaled(struct dma_fence *fence)
+#else
static bool mali_internal_fence_signaled(struct fence *fence)
+#endif
{
int ret;
struct mali_internal_sync_point *sync_pt;
ret = parent->ops->has_signaled(sync_pt);
if (0 > ret)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ fence->error = ret;
+#else
fence->status = ret;
+#endif
return ret;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static bool mali_internal_fence_enable_signaling(struct dma_fence *fence)
+#else
static bool mali_internal_fence_enable_signaling(struct fence *fence)
+#endif
{
struct mali_internal_sync_point *sync_pt;
struct mali_internal_sync_timeline *parent;
return true;
}
-static void mali_internal_fence_value_str(struct fence *fence,
- char *str, int size)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_internal_fence_value_str(struct dma_fence *fence, char *str, int size)
+#else
+static void mali_internal_fence_value_str(struct fence *fence, char *str, int size)
+#endif
{
struct mali_internal_sync_point *sync_pt;
struct mali_internal_sync_timeline *parent;
parent->ops->print_sync_pt(sync_pt);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const struct dma_fence_ops fence_ops = {
+#else
static const struct fence_ops fence_ops = {
+#endif
.get_driver_name = mali_internal_fence_get_driver_name,
.get_timeline_name = mali_internal_fence_get_timeline_name,
.enable_signaling = mali_internal_fence_enable_signaling,
.signaled = mali_internal_fence_signaled,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ .wait = dma_fence_default_wait,
+#else
.wait = fence_default_wait,
+#endif
.release = mali_internal_fence_release,
.fence_value_str = mali_internal_fence_value_str,
};
num_fences = atomic_read(&sync_fence->num_fences);
for (i = 0; i <num_fences; ++i) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_remove_callback(sync_fence->cbs[i].base, &sync_fence->cbs[i].cb);
+ dma_fence_put(sync_fence->cbs[i].base);
+#else
fence_remove_callback(sync_fence->cbs[i].base, &sync_fence->cbs[i].cb);
fence_put(sync_fence->cbs[i].base);
+#endif
}
kfree(sync_fence);
for (i = 0; i < num_fences; ++i) {
struct mali_internal_sync_pt_info *sync_pt_info = NULL;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *base = sync_fence->cbs[i].base;
+#else
struct fence *base = sync_fence->cbs[i].base;
-
+#endif
if ((size - len) < sizeof(struct mali_internal_sync_pt_info)) {
MALI_PRINT_ERROR(("Mali internal sync:Failed to fence size check when sync fence ioctl fence data info.\n"));
err = -ENOMEM;
strlcpy(sync_pt_info->obj_name, base->ops->get_timeline_name(base), sizeof(sync_pt_info->obj_name));
strlcpy(sync_pt_info->driver_name, base->ops->get_driver_name(base), sizeof(sync_pt_info->driver_name));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ if (dma_fence_is_signaled(base))
+ sync_pt_info->status = base->error >= 0 ? 1 : base->error;
+#else
if (fence_is_signaled(base))
sync_pt_info->status = base->status >= 0 ? 1 : base->status;
+#endif
else
sync_pt_info->status = 0;
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+#include <linux/dma-fence.h>
+#else
#include <linux/fence.h>
-
+#endif
struct mali_internal_sync_timeline;
struct mali_internal_sync_point;
struct mali_internal_sync_fence;
};
struct mali_internal_sync_point {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence base;
+#else
struct fence base;
+#endif
struct list_head sync_pt_list;
};
struct mali_internal_sync_fence_cb {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence_cb cb;
+ struct dma_fence *base;
+#else
struct fence_cb cb;
struct fence *base;
+#endif
struct mali_internal_sync_fence *sync_fence;
};
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
#include <asm/uaccess.h>
+#endif
#include <linux/module.h>
#include <linux/mali/mali_utgard.h>
#include "mali_kernel_sysfs.h"
}
static void mali_mem_vma_close(struct vm_area_struct *vma)
{
-
- struct file *filp = NULL;
- struct mali_session_data *session = NULL;
-
/* If need to share the allocation, unref ref_count here */
mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
- filp = vma->vm_file;
- MALI_DEBUG_ASSERT(filp);
- session = (struct mali_session_data *)filp->private_data;
- MALI_DEBUG_ASSERT(session);
+ if (NULL != alloc) {
+ struct file *filp = NULL;
+ struct mali_session_data *session = NULL;
- mali_session_memory_lock(session);
- vma->vm_private_data = NULL;
- mali_session_memory_unlock(session);
+ filp = vma->vm_file;
+ MALI_DEBUG_ASSERT(filp);
+ session = (struct mali_session_data *)filp->private_data;
+ MALI_DEBUG_ASSERT(session);
- mali_allocation_unref(&alloc);
+ mali_session_memory_lock(session);
+ vma->vm_private_data = NULL;
+ mali_session_memory_unlock(session);
+
+ mali_allocation_unref(&alloc);
+ }
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static int mali_mem_vma_fault(struct vm_fault *vmf)
+#else
static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+#endif
{
struct file *filp = NULL;
struct mali_session_data *session = NULL;
mali_mem_allocation *alloc = NULL;
-
mali_mem_backend *mem_bkend = NULL;
int ret;
int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ struct vm_area_struct *vma = vmf->vma;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ unsigned long address = (unsigned long)vmf->address;
+#else
unsigned long address = (unsigned long)vmf->virtual_address;
-
+#endif
filp = vma->vm_file;
MALI_DEBUG_ASSERT(filp);
session = (struct mali_session_data *)filp->private_data;
}
mutex_unlock(&mali_idr_mutex);
+ if ((vma->vm_start + mem_bkend->size) > vma->vm_end) {
+ MALI_PRINT_ERROR(("mali_mmap: out of memory mapping map_size %d, physical_size %d\n", vma->vm_end - vma->vm_start, mem_bkend->size));
+ return -EFAULT;
+ }
+
if (!(MALI_MEM_SWAP == mali_alloc->type ||
(MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
/* Set some bits which indicate that, the memory is IO memory, meaning
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/rbtree.h>
struct mali_page_directory *pagedir;
_mali_osk_errcode_t err;
struct scatterlist *sg;
- u32 virt, flags;
+ u32 virt, flags, unmap_dma_size;
int i;
MALI_DEBUG_ASSERT_POINTER(mem_backend);
mem = mem_backend->dma_buf.attachment;
MALI_DEBUG_ASSERT_POINTER(mem);
-
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+ unmap_dma_size = mem->buf->size;
session = alloc->session;
MALI_DEBUG_ASSERT_POINTER(session);
MALI_DEBUG_ASSERT(mem->session == session);
u32 size = sg_dma_len(sg);
dma_addr_t phys = sg_dma_address(sg);
+ unmap_dma_size -= size;
/* sg must be page aligned. */
MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
mem->is_mapped = MALI_TRUE;
+ if (0 != unmap_dma_size) {
+ MALI_DEBUG_PRINT_ERROR(("The dma buf size isn't equal to the total scatterlists' dma length.\n"));
+ mali_session_memory_unlock(session);
+ return -EFAULT;
+ }
+
/* Wake up any thread waiting for buffer to become mapped */
wake_up_all(&mem->wait_queue);
MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
+ if (args->vsize < args->psize) {
+ MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: vsize %d shouldn't be less than psize %d\n", args->vsize, args->psize));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ } else if ((args->vsize % _MALI_OSK_MALI_PAGE_SIZE) || (args->psize % _MALI_OSK_MALI_PAGE_SIZE)) {
+ MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: not supported non page aligned size-->pszie %d, vsize %d\n", args->psize, args->vsize));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ } else if ((args->vsize != args->psize) && ((args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) || (args->flags & _MALI_MEMORY_ALLOCATE_SECURE))) {
+ MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: not supported mem resizeable for mem flag %d\n", args->flags));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
/* Check if the address is allocated
*/
mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
return _MALI_OSK_ERR_INVALID_ARGS;
}
- MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+
mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
- if (mali_alloc)
+ if (mali_alloc) {
+ if ((MALI_MEM_UMP == mali_alloc->type) || (MALI_MEM_DMA_BUF == mali_alloc->type)
+ || (MALI_MEM_EXTERNAL == mali_alloc->type)) {
+ MALI_PRINT_ERROR(("_mali_ukk_mem_free: not supported for memory type %d\n", mali_alloc->type));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+ }
/* check ref_count */
args->free_pages_nr = mali_allocation_unref(&mali_alloc);
-
+ }
return _MALI_OSK_ERR_OK;
}
return _MALI_OSK_ERR_INVALID_ARGS;
}
- if (NULL != mali_allocation)
+ if (NULL != mali_allocation) {
+
+ if ((MALI_MEM_UMP != mali_allocation->type) && (MALI_MEM_DMA_BUF != mali_allocation->type)
+ && (MALI_MEM_EXTERNAL != mali_allocation->type)) {
+ MALI_PRINT_ERROR(("_mali_ukk_mem_unbind not supported for memory type %d\n", mali_allocation->type));
+ return _MALI_OSK_ERR_UNSUPPORTED;
+ }
+
/* check ref_count */
mali_allocation_unref(&mali_allocation);
+ }
return _MALI_OSK_ERR_OK;
}
return ret;
}
- MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type);
+ if (MALI_MEM_COW != mem_backend->type) {
+ MALI_PRINT_ERROR(("_mali_ukk_mem_cow_modify_range: not supported for memory type %d !\n", mem_backend->type));
+ return _MALI_OSK_ERR_FAULT;
+ }
ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
break;
}
+ if ((NULL != mali_alloc->cpu_mapping.vma) && (mali_alloc == (mali_alloc->cpu_mapping.vma)->vm_private_data))
+ (mali_alloc->cpu_mapping.vma)->vm_private_data = NULL;
+
/*Remove backend memory idex */
mutex_lock(&mali_idr_mutex);
idr_remove(&mali_backend_idr, mali_alloc->backend_handle);
* Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
*/
#include <linux/kernel.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
#include <asm/uaccess.h>
+#endif
#include <linux/platform_device.h>
#include <linux/mali/mali_utgard.h>
#include <linux/of.h>
* Implementation of the OS abstraction layer for the kernel device driver
*/
#include <linux/kernel.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
#include <asm/uaccess.h>
+#endif
#include <asm/cacheflush.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#ifndef __MALI_OSK_SPECIFIC_H__
#define __MALI_OSK_SPECIFIC_H__
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
#include <asm/uaccess.h>
+#endif
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/hardirq.h>
sync_fence = mali_internal_sync_fence_create(sync_pt);
if (NULL == sync_fence) {
MALI_PRINT_ERROR(("Mali sync: sync_fence creation failed\n"));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ dma_fence_put(&sync_pt->base);
+#else
fence_put(&sync_pt->base);
+#endif
return NULL;
}
*/
#include <linux/fs.h> /* file system operations */
#include <linux/slab.h> /* memort allocation functions */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include <linux/slab.h>
#include "mali_ukk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
-#include <asm/uaccess.h> /* user space access */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
#include "mali_ukk.h"
#include "mali_osk.h"