[DO NOT RELEASE][9610] drivers: gud: add gud driver
authorKim Mankyum <mankyum.kim@samsung.com>
Fri, 18 May 2018 02:20:46 +0000 (11:20 +0900)
committerJunho Choi <junhosj.choi@samsung.com>
Thu, 24 May 2018 00:02:00 +0000 (09:02 +0900)
Change-Id: I6e854a4957b5835b28ce8a76da47d349381db8bb
Signed-off-by: Kim Mankyum <mankyum.kim@samsung.com>
73 files changed:
drivers/gud/Kconfig [new file with mode: 0644]
drivers/gud/Makefile [new file with mode: 0644]
drivers/gud/gud-exynos9610/Kconfig [new file with mode: 0755]
drivers/gud/gud-exynos9610/Makefile [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/Makefile [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/admin.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/admin.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/arm.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/build_tag.h [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/client.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/client.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/clientlib.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/clock.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/clock.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/fastcall.c [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/fastcall.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/iwp.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/iwp.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/logging.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/logging.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/main.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/main.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/gptci.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcifc.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mciiwp.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcimcp.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcinq.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcitime.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcloadformat.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mcp.c [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/mcp.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/mmu.c [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/mmu.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/nq.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/nq.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/platform.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_api.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_api_imp.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_error.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_types.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_admin.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_linux_api.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_user.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/public/mobicore_driver_api.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/session.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/session.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/teeclientapi.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/user.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/user.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/MobiCoreDriver/xen_be.c [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/xen_be.h [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/xen_common.c [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/xen_common.h [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/xen_fe.c [new file with mode: 0644]
drivers/gud/gud-exynos9610/MobiCoreDriver/xen_fe.h [new file with mode: 0644]
drivers/gud/gud-exynos9610/TlcTui/Makefile [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/build_tag.h [new file with mode: 0644]
drivers/gud/gud-exynos9610/TlcTui/inc/dciTui.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/inc/t-base-tui.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/main.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/public/tui_ioctl.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/tlcTui.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/tlcTui.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/trustedui.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/tui-hal.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/TlcTui/tui-hal.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-booster/Kconfig [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-booster/Makefile [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-booster/sec_os_booster.c [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-booster/secos_booster.h [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-ctrl/Kconfig [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-ctrl/Makefile [new file with mode: 0755]
drivers/gud/gud-exynos9610/sec-os-ctrl/sec_os_ctrl.c [new file with mode: 0755]

diff --git a/drivers/gud/Kconfig b/drivers/gud/Kconfig
new file mode 100644 (file)
index 0000000..772ab4b
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Mobicore Configuration
+#
+if SOC_EXYNOS9610
+source "drivers/gud/gud-exynos9610/Kconfig"
+endif
diff --git a/drivers/gud/Makefile b/drivers/gud/Makefile
new file mode 100644 (file)
index 0000000..a6888be
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_SOC_EXYNOS9610) += gud-exynos9610/
diff --git a/drivers/gud/gud-exynos9610/Kconfig b/drivers/gud/gud-exynos9610/Kconfig
new file mode 100755 (executable)
index 0000000..7827a98
--- /dev/null
@@ -0,0 +1,39 @@
+#
+# Trustonic drivers configuration
+#
+config TRUSTONIC_TEE
+    tristate "Trustonic TEE Driver"
+    depends on ARM || ARM64
+    default y
+    ---help---
+      Enable Trustonic TEE support
+
+config TRUSTONIC_TEE_LPAE
+    bool "Trustonic TEE uses LPAE"
+    depends on TRUSTONIC_TEE
+    default y if ARM64
+    default n if ARM
+    ---help---
+      Enable Trustonic TEE 64-bit physical addresses support
+
+config TRUSTONIC_TEE_DEBUG
+    bool "Trustonic TEE driver debug mode"
+    depends on TRUSTONIC_TEE
+    default n
+    ---help---
+      Enable the debug mode in the Trustonic TEE Driver.
+
+config TRUSTONIC_TRUSTED_UI
+    tristate "Trustonic Trusted UI"
+    depends on TRUSTONIC_TEE
+    ---help---
+      Enable Trustonic Trusted User Interface
+
+config TRUSTONIC_TRUSTED_UI_FB_BLANK
+    bool "Trustonic Trusted UI with fb_blank"
+    depends on TRUSTONIC_TRUSTED_UI
+    ---help---
+    Blank the framebuffer before starting a TUI session
+
+source "drivers/gud/gud-exynos9610/sec-os-ctrl/Kconfig"
+source "drivers/gud/gud-exynos9610/sec-os-booster/Kconfig"
diff --git a/drivers/gud/gud-exynos9610/Makefile b/drivers/gud/gud-exynos9610/Makefile
new file mode 100755 (executable)
index 0000000..a4c6669
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (c) 2013-2018 TRUSTONIC LIMITED
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#
+# Makefile for the Kinibi core and trusted UI drivers
+#
+
+obj-$(CONFIG_TRUSTONIC_TEE) := MobiCoreDriver/
+obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui/
+
+obj-$(CONFIG_SECURE_OS_CONTROL) += sec-os-ctrl/
+obj-$(CONFIG_SECURE_OS_BOOSTER_API) += sec-os-booster/
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/Makefile b/drivers/gud/gud-exynos9610/MobiCoreDriver/Makefile
new file mode 100755 (executable)
index 0000000..712fafe
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (c) 2013-2018 TRUSTONIC LIMITED
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#
+# Makefile for the Kinibi core driver
+#
+
+GUD_ROOT_FOLDER := drivers/gud/gud-exynos9610
+
+# add our modules to kernel.
+obj-$(CONFIG_TRUSTONIC_TEE) += mcDrvModule.o
+
+mcDrvModule-y := \
+       admin.o \
+       client.o \
+       clientlib.o \
+       clock.o \
+       fastcall.o \
+       iwp.o \
+       logging.o \
+       main.o \
+       mcp.o \
+       mmu.o \
+       nq.o \
+       session.o \
+       teeclientapi.o \
+       user.o \
+       xen_be.o \
+       xen_common.o \
+       xen_fe.o
+
+# Release mode by default
+ccflags-y += -DNDEBUG
+ccflags-y += -Wno-declaration-after-statement
+
+ccflags-$(CONFIG_TRUSTONIC_TEE_DEBUG) += -DDEBUG
+
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/admin.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/admin.c
new file mode 100755 (executable)
index 0000000..022ea27
--- /dev/null
@@ -0,0 +1,1146 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/signal.h>        /* signal_pending */
+#endif
+#include <linux/freezer.h>
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "main.h"
+#include "mmu.h"       /* For load_check and load_token */
+#include "mcp.h"
+#include "nq.h"
+#include "client.h"
+#include "admin.h"
+
+static struct {
+       struct mutex admin_tgid_mutex;  /* Lock for admin_tgid below */
+       pid_t admin_tgid;
+       int (*tee_start_cb)(void);
+       void (*tee_stop_cb)(void);
+       int last_tee_ret;
+       struct notifier_block tee_stop_notifier;
+} l_ctx;
+
+static struct mc_admin_driver_request {
+       /* Global */
+       struct mutex mutex;             /* Protects access to this struct */
+       struct mutex states_mutex;      /* Protect access to the states */
+       enum client_state {
+               IDLE,
+               REQUEST_SENT,
+               BUFFERS_READY,
+       } client_state;
+       enum server_state {
+               NOT_CONNECTED,          /* Device not open */
+               READY,                  /* Waiting for requests */
+               REQUEST_RECEIVED,       /* Got a request, is working */
+               RESPONSE_SENT,          /* Has sent a response header */
+               DATA_SENT,              /* Blocked until data is consumed */
+       } server_state;
+       /* Request */
+       u32 request_id;
+       struct mc_admin_request request;
+       struct completion client_complete;
+       /* Response */
+       struct mc_admin_response response;
+       struct completion server_complete;
+       void *buffer;                   /* Reception buffer (pre-allocated) */
+       size_t size;                    /* Size of the reception buffer */
+       bool lock_channel_during_freeze;/* Is freezing ongoing ? */
+} g_request;
+
+/* The mutex around the channel communication has to be wrapped in order
+ * to handle this use case :
+ * client 1 calls request_send()
+ *         wait on wait_for_completion_interruptible (with channel mutex taken)
+ * client 2 calls request_send()
+ *         waits on mutex_lock(channel mutex)
+ * kernel starts freezing tasks (suspend or reboot ongoing)
+ * if we do nothing, then the freezing will be aborted because client 1
+ * and 2 have to enter the refrigerator by themselves.
+ * Note : mutex cannot be held during freezing, so client 1 has release it
+ * => step 1 : client 1 sets a bool that says that the channel is still in use
+ * => step 2 : client 1 release the lock and enter the refrigerator
+ * => now any client trying to use the channel will face the bool preventing
+ * to use the channel. They also have to enter the refrigerator.
+ *
+ * These 3 functions handle this
+ */
+static void check_freezing_ongoing(void)
+{
+       /* We don't want to let the channel be used. Let everyone know
+        * that we're using it
+        */
+       g_request.lock_channel_during_freeze = 1;
+       /* Now we can safely release the lock */
+       mutex_unlock(&g_request.mutex);
+       /* Let's try to freeze */
+       try_to_freeze();
+       /* Either freezing happened or was canceled.
+        * In both cases, reclaim the lock
+        */
+       mutex_lock(&g_request.mutex);
+       g_request.lock_channel_during_freeze = 0;
+}
+
+static void channel_lock(void)
+{
+       while (1) {
+               mutex_lock(&g_request.mutex);
+               /* We took the lock, but is there any freezing ongoing? */
+               if (g_request.lock_channel_during_freeze == 0)
+                       break;
+
+               /* yes, so let's freeze */
+               mutex_unlock(&g_request.mutex);
+               try_to_freeze();
+               /* Either freezing succeeded or was canceled.
+                * In both case, try again to get the lock.
+                * Give some CPU time to let the contender
+                * finish his channel operation
+                */
+               msleep(500);
+       };
+}
+
+static void channel_unlock(void)
+{
+       mutex_unlock(&g_request.mutex);
+}
+
+#if KERNEL_VERSION(3, 13, 0) <= LINUX_VERSION_CODE
+static inline void reinit_completion_local(struct completion *x)
+{
+       reinit_completion(x);
+}
+#else
+static inline void reinit_completion_local(struct completion *x)
+{
+       INIT_COMPLETION(*x);
+}
+#endif
+
+static struct tee_object *tee_object_alloc(bool is_sp_trustlet, size_t length)
+{
+       struct tee_object *obj;
+       size_t size = sizeof(*obj) + length;
+       size_t header_length = 0;
+
+       /* Determine required size */
+       if (is_sp_trustlet) {
+               /* Need space for lengths info and containers */
+               header_length = sizeof(struct mc_blob_len_info);
+               size += header_length + 3 * MAX_SO_CONT_SIZE;
+       }
+
+       /* Check size for overflow */
+       if (size < length) {
+               mc_dev_err(-ENOMEM, "cannot allocate object of size %zu",
+                          length);
+               return NULL;
+       }
+
+       /* Allocate memory */
+       obj = vzalloc(size);
+       if (!obj)
+               return NULL;
+
+       /* A non-zero header_length indicates that we have a SP trustlet */
+       obj->header_length = (u32)header_length;
+       obj->length = (u32)length;
+       return obj;
+}
+
+void tee_object_free(struct tee_object *obj)
+{
+       vfree(obj);
+}
+
+static inline void client_state_change(enum client_state state)
+{
+       mutex_lock(&g_request.states_mutex);
+       mc_dev_devel("client state changes from %d to %d",
+                    g_request.client_state, state);
+       g_request.client_state = state;
+       mutex_unlock(&g_request.states_mutex);
+}
+
+static inline bool client_state_is(enum client_state state)
+{
+       bool is;
+
+       mutex_lock(&g_request.states_mutex);
+       is = g_request.client_state == state;
+       mutex_unlock(&g_request.states_mutex);
+       return is;
+}
+
+static inline void server_state_change(enum server_state state)
+{
+       mutex_lock(&g_request.states_mutex);
+       mc_dev_devel("server state changes from %d to %d",
+                    g_request.server_state, state);
+       g_request.server_state = state;
+       mutex_unlock(&g_request.states_mutex);
+}
+
+static inline bool server_state_is(enum server_state state)
+{
+       bool is;
+
+       mutex_lock(&g_request.states_mutex);
+       is = g_request.server_state == state;
+       mutex_unlock(&g_request.states_mutex);
+       return is;
+}
+
+static void request_cancel(void);
+
+static int request_send(u32 command, const struct mc_uuid_t *uuid, bool is_gp,
+                       u32 spid)
+{
+       int counter = 0;
+       int wait_tens = 0;
+       int ret = 0;
+
+       /* Prepare request */
+       mutex_lock(&g_request.states_mutex);
+       /* Wait a little for daemon to connect */
+       while (g_request.server_state == NOT_CONNECTED) {
+               mutex_unlock(&g_request.states_mutex);
+               if (signal_pending(current))
+                       return -ERESTARTSYS;
+
+               if (counter++ == 10) {
+                       wait_tens++;
+                       mc_dev_info("daemon not connected after %d0s, waiting",
+                                   wait_tens);
+                       counter = 0;
+               }
+
+               ssleep(1);
+               mutex_lock(&g_request.states_mutex);
+       }
+
+       WARN_ON(g_request.client_state != IDLE);
+       if (g_request.server_state != READY) {
+               mutex_unlock(&g_request.states_mutex);
+               if (g_request.server_state != NOT_CONNECTED) {
+                       ret = -EPROTO;
+                       mc_dev_err(ret, "invalid daemon state %d",
+                                  g_request.server_state);
+                       goto end;
+               } else {
+                       ret = -EHOSTUNREACH;
+                       mc_dev_err(ret, "daemon not connected");
+                       goto end;
+               }
+       }
+
+       memset(&g_request.request, 0, sizeof(g_request.request));
+       memset(&g_request.response, 0, sizeof(g_request.response));
+       /*
+        * Do not update the request ID until it is dealt with, in case the
+        * daemon arrives later.
+        */
+       g_request.request.request_id = g_request.request_id;
+       g_request.request.command = command;
+       if (uuid)
+               memcpy(&g_request.request.uuid, uuid, sizeof(*uuid));
+       else
+               memset(&g_request.request.uuid, 0, sizeof(*uuid));
+
+       g_request.request.is_gp = is_gp;
+       g_request.request.spid = spid;
+       g_request.client_state = REQUEST_SENT;
+       mutex_unlock(&g_request.states_mutex);
+
+       /* Send request */
+       complete(&g_request.client_complete);
+       mc_dev_devel("request sent");
+
+       /* Wait for header */
+       do {
+               ret = wait_for_completion_interruptible(
+                                               &g_request.server_complete);
+               if (!ret)
+                       break;
+               /* we may have to freeze now */
+               check_freezing_ongoing();
+               /* freezing happened or was canceled,
+                * let's sleep and try again
+                */
+               msleep(500);
+       } while (1);
+       mc_dev_devel("response received");
+
+       /* Server should be waiting with some data for us */
+       mutex_lock(&g_request.states_mutex);
+       switch (g_request.server_state) {
+       case NOT_CONNECTED:
+               /* Daemon gone */
+               ret = -EPIPE;
+               mc_dev_devel("daemon disconnected");
+               break;
+       case READY:
+               /* No data to come, likely an error */
+               ret = -g_request.response.error_no;
+               mc_dev_devel("daemon ret=%d", ret);
+               break;
+       case RESPONSE_SENT:
+       case DATA_SENT:
+               /* Normal case, data to come */
+               ret = 0;
+               break;
+       case REQUEST_RECEIVED:
+               /* Should not happen as complete means the state changed */
+               ret = -EPIPE;
+               mc_dev_err(ret, "daemon is in a bad state: %d",
+                          g_request.server_state);
+               break;
+       }
+
+       mutex_unlock(&g_request.states_mutex);
+
+end:
+       if (ret)
+               request_cancel();
+
+       mc_dev_devel("ret=%d", ret);
+       return ret;
+}
+
+static int request_receive(void *address, u32 size)
+{
+       /*
+        * At this point we have received the header and prepared some buffers
+        * to receive data that we know are coming from the server.
+        */
+
+       /* Check server state */
+       bool server_ok;
+
+       mutex_lock(&g_request.states_mutex);
+       server_ok = (g_request.server_state == RESPONSE_SENT) ||
+                   (g_request.server_state == DATA_SENT);
+       mutex_unlock(&g_request.states_mutex);
+       if (!server_ok) {
+               int ret = -EPIPE;
+
+               mc_dev_err(ret, "expected server state %d or %d, not %d",
+                          RESPONSE_SENT, DATA_SENT, g_request.server_state);
+               request_cancel();
+               return ret;
+       }
+
+       /* Setup reception buffer */
+       g_request.buffer = address;
+       g_request.size = size;
+       client_state_change(BUFFERS_READY);
+
+       /* Unlock write of data */
+       complete(&g_request.client_complete);
+
+       /* Wait for data */
+       do {
+               int ret = 0;
+
+               ret = wait_for_completion_interruptible(
+                                            &g_request.server_complete);
+               if (!ret)
+                       break;
+               /* We may have to freeze now */
+               check_freezing_ongoing();
+               /* freezing happened or was canceled,
+                * let's sleep and try again
+                */
+               msleep(500);
+       } while (1);
+
+       /* Reset reception buffer */
+       g_request.buffer = NULL;
+       g_request.size = 0;
+
+       /* Return to idle state */
+       client_state_change(IDLE);
+       return 0;
+}
+
+/* Must be called instead of request_receive() to cancel a pending request */
+static void request_cancel(void)
+{
+       /* Unlock write of data */
+       mutex_lock(&g_request.states_mutex);
+       if (g_request.server_state == DATA_SENT)
+               complete(&g_request.client_complete);
+
+       /* Return to idle state */
+       g_request.client_state = IDLE;
+       mutex_unlock(&g_request.states_mutex);
+}
+
+static int admin_get_root_container(void *address)
+{
+       int ret = 0;
+
+       /* Lock communication channel */
+       channel_lock();
+
+       /* Send request and wait for header */
+       ret = request_send(MC_DRV_GET_ROOT_CONTAINER, NULL, 0, 0);
+       if (ret)
+               goto end;
+
+       /* Check length against max */
+       if (g_request.response.length >= MAX_SO_CONT_SIZE) {
+               request_cancel();
+               ret = EREMOTEIO;
+               mc_dev_err(ret, "response length exceeds maximum");
+               goto end;
+       }
+
+       /* Get data */
+       ret = request_receive(address, g_request.response.length);
+       if (!ret)
+               ret = g_request.response.length;
+
+end:
+       channel_unlock();
+       return ret;
+}
+
+static int admin_get_sp_container(void *address, u32 spid)
+{
+       int ret = 0;
+
+       /* Lock communication channel */
+       channel_lock();
+
+       /* Send request and wait for header */
+       ret = request_send(MC_DRV_GET_SP_CONTAINER, NULL, 0, spid);
+       if (ret)
+               goto end;
+
+       /* Check length against max */
+       if (g_request.response.length >= MAX_SO_CONT_SIZE) {
+               request_cancel();
+               ret = EREMOTEIO;
+               mc_dev_err(ret, "response length exceeds maximum");
+               goto end;
+       }
+
+       /* Get data */
+       ret = request_receive(address, g_request.response.length);
+       if (!ret)
+               ret = g_request.response.length;
+
+end:
+       channel_unlock();
+       return ret;
+}
+
+static int admin_get_trustlet_container(void *address,
+                                       const struct mc_uuid_t *uuid, u32 spid)
+{
+       int ret = 0;
+
+       /* Lock communication channel */
+       channel_lock();
+
+       /* Send request and wait for header */
+       ret = request_send(MC_DRV_GET_TRUSTLET_CONTAINER, uuid, 0, spid);
+       if (ret)
+               goto end;
+
+       /* Check length against max */
+       if (g_request.response.length >= MAX_SO_CONT_SIZE) {
+               request_cancel();
+               ret = EREMOTEIO;
+               mc_dev_err(ret, "response length exceeds maximum");
+               goto end;
+       }
+
+       /* Get data */
+       ret = request_receive(address, g_request.response.length);
+       if (!ret)
+               ret = g_request.response.length;
+
+end:
+       channel_unlock();
+       return ret;
+}
+
+static struct tee_object *admin_get_trustlet(const struct mc_uuid_t *uuid,
+                                            bool is_gp, u32 *spid)
+{
+       struct tee_object *obj = NULL;
+       bool is_sp_tl;
+       int ret = 0;
+
+       /* Lock communication channel */
+       channel_lock();
+
+       /* Send request and wait for header */
+       ret = request_send(MC_DRV_GET_TRUSTLET, uuid, is_gp, 0);
+       if (ret)
+               goto end;
+
+       /* Allocate memory */
+       is_sp_tl = g_request.response.service_type == SERVICE_TYPE_SP_TRUSTLET;
+       obj = tee_object_alloc(is_sp_tl, g_request.response.length);
+       if (!obj) {
+               request_cancel();
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       /* Get data */
+       ret = request_receive(&obj->data[obj->header_length], obj->length);
+       *spid = g_request.response.spid;
+
+end:
+       channel_unlock();
+       if (ret)
+               return ERR_PTR(ret);
+
+       return obj;
+}
+
+static void mc_admin_sendcrashdump(void)
+{
+       int ret = 0;
+
+       /* Lock communication channel */
+       channel_lock();
+
+       /* Send request and wait for header */
+       ret = request_send(MC_DRV_SIGNAL_CRASH, NULL, false, 0);
+       if (ret)
+               goto end;
+
+       /* Done */
+       request_cancel();
+
+end:
+       channel_unlock();
+}
+
+static int tee_stop_notifier_fn(struct notifier_block *nb, unsigned long event,
+                               void *data)
+{
+       mc_admin_sendcrashdump();
+       l_ctx.last_tee_ret = -EHOSTUNREACH;
+       return 0;
+}
+
+static int tee_object_make(u32 spid, struct tee_object *obj)
+{
+       struct mc_blob_len_info *l_info = (struct mc_blob_len_info *)obj->data;
+       u8 *address = &obj->data[obj->header_length + obj->length];
+       struct mclf_header_v2 *thdr;
+       int ret;
+
+       /* Get root container */
+       ret = admin_get_root_container(address);
+       if (ret < 0)
+               goto err;
+
+       l_info->root_size = ret;
+       address += ret;
+
+       /* Get SP container */
+       ret = admin_get_sp_container(address, spid);
+       if (ret < 0)
+               goto err;
+
+       l_info->sp_size = ret;
+       address += ret;
+
+       /* Get trustlet container */
+       thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
+       ret = admin_get_trustlet_container(address, &thdr->uuid, spid);
+       if (ret < 0)
+               goto err;
+
+       l_info->ta_size = ret;
+       address += ret;
+
+       /* Setup lengths information */
+       l_info->magic = MC_TLBLOBLEN_MAGIC;
+       obj->length += sizeof(*l_info);
+       obj->length += l_info->root_size + l_info->sp_size + l_info->ta_size;
+       ret = 0;
+
+err:
+       return ret;
+}
+
+struct tee_object *tee_object_copy(uintptr_t address, size_t length)
+{
+       struct tee_object *obj;
+
+       /* Allocate memory */
+       obj = tee_object_alloc(false, length);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       /* Copy trustlet */
+       memcpy(obj->data, (void *)address, length);
+       return obj;
+}
+
+struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length)
+{
+       char __user *addr = (char __user *)address;
+       struct tee_object *obj;
+       u8 *data;
+       struct mclf_header_v2 thdr;
+       int ret;
+
+       /* Check length */
+       if (length < sizeof(thdr)) {
+               ret = -EFAULT;
+               mc_dev_err(ret, "buffer shorter than header size");
+               return ERR_PTR(ret);
+       }
+
+       /* Read header */
+       if (copy_from_user(&thdr, addr, sizeof(thdr))) {
+               ret = -EFAULT;
+               mc_dev_err(ret, "header: copy_from_user failed");
+               return ERR_PTR(ret);
+       }
+
+       /* Allocate memory */
+       obj = tee_object_alloc(thdr.service_type == SERVICE_TYPE_SP_TRUSTLET,
+                              length);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       /* Copy header */
+       data = &obj->data[obj->header_length];
+       memcpy(data, &thdr, sizeof(thdr));
+       /* Copy the rest of the data */
+       data += sizeof(thdr);
+       if (copy_from_user(data, &addr[sizeof(thdr)], length - sizeof(thdr))) {
+               ret = -EFAULT;
+               mc_dev_err(ret, "data: copy_from_user failed");
+               vfree(obj);
+               return ERR_PTR(ret);
+       }
+
+       if (obj->header_length) {
+               ret = tee_object_make(spid, obj);
+               if (ret) {
+                       vfree(obj);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       return obj;
+}
+
+struct tee_object *tee_object_select(const struct mc_uuid_t *uuid)
+{
+       struct tee_object *obj;
+       struct mclf_header_v2 *thdr;
+
+       obj = tee_object_alloc(false, sizeof(*thdr));
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
+       memcpy(&thdr->uuid, uuid, sizeof(thdr->uuid));
+       return obj;
+}
+
+struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp)
+{
+       struct tee_object *obj;
+       u32 spid = 0;
+
+       /* admin_get_trustlet creates the right object based on service type */
+       obj = admin_get_trustlet(uuid, is_gp, &spid);
+       if (IS_ERR(obj))
+               return obj;
+
+       /* SP trustlet: create full secure object with all containers */
+       if (obj->header_length) {
+               int ret;
+
+               /* Do not return EINVAL in this case as SPID was not found */
+               if (!spid) {
+                       vfree(obj);
+                       return ERR_PTR(-ENOENT);
+               }
+
+               ret = tee_object_make(spid, obj);
+               if (ret) {
+                       vfree(obj);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       return obj;
+}
+
+static inline int load_driver(struct tee_client *client,
+                             struct mc_admin_load_info *load_info)
+{
+       struct mcp_open_info info = {
+               .spid = load_info->spid,
+               .va = load_info->address,
+               .len = load_info->length,
+               .uuid = &load_info->uuid,
+               .tci_len = PAGE_SIZE,
+               /* ExySp : Kinibi410 */
+               .user = 1,
+       };
+
+       u32 session_id = 0;
+       int ret;
+
+       if (info.va)
+               info.type = TEE_MC_DRIVER;
+       else
+               info.type = TEE_MC_DRIVER_UUID;
+
+       /* Create DCI in case it's needed */
+       ret = client_cbuf_create(client, info.tci_len, &info.tci_va, NULL);
+       if (ret)
+               return ret;
+
+       /* Open session */
+       ret = client_mc_open_common(client, &info, &session_id);
+       if (!ret)
+               mc_dev_devel("driver loaded with session id %x", session_id);
+
+       /*
+        * Always 'free' the buffer (will remain as long as used), never freed
+        * otherwise
+        */
+       client_cbuf_free(client, info.tci_va);
+
+       return ret;
+}
+
+static inline int load_token(struct mc_admin_load_info *token)
+{
+       struct tee_mmu *mmu;
+       struct mcp_buffer_map map;
+       struct mc_ioctl_buffer buf;
+       int ret;
+
+       buf.va = (uintptr_t)token->address;
+       buf.len = token->length;
+       buf.flags = MC_IO_MAP_INPUT;
+       mmu = tee_mmu_create(current->mm, &buf);
+       if (IS_ERR(mmu))
+               return PTR_ERR(mmu);
+
+       tee_mmu_buffer(mmu, &map);
+       ret = mcp_load_token(token->address, &map);
+       tee_mmu_put(mmu);
+       return ret;
+}
+
+static inline int load_check(struct mc_admin_load_info *info)
+{
+       struct tee_object *obj;
+       struct tee_mmu *mmu;
+       struct mcp_buffer_map map;
+       struct mc_ioctl_buffer buf;
+       int ret;
+
+       obj = tee_object_read(info->spid, info->address, info->length);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       buf.va = (uintptr_t)obj->data;
+       buf.len = obj->length;
+       buf.flags = MC_IO_MAP_INPUT;
+       mmu = tee_mmu_create(NULL, &buf);
+       if (IS_ERR(mmu))
+               return PTR_ERR(mmu);
+
+       tee_mmu_buffer(mmu, &map);
+       ret = mcp_load_check(obj, &map);
+       tee_mmu_put(mmu);
+       return ret;
+}
+
+static ssize_t admin_write(struct file *file, const char __user *user,
+                          size_t len, loff_t *off)
+{
+       int ret;
+
+       /* No offset allowed */
+       if (*off) {
+               ret = -ECOMM;
+               mc_dev_err(ret, "offset not supported");
+               g_request.response.error_no = EPIPE;
+               goto err;
+       }
+
+       if (server_state_is(REQUEST_RECEIVED)) {
+               /* Check client state */
+               if (!client_state_is(REQUEST_SENT)) {
+                       ret = -EPIPE;
+                       mc_dev_err(ret, "expected client state %d, not %d",
+                                  REQUEST_SENT, g_request.client_state);
+                       g_request.response.error_no = EPIPE;
+                       goto err;
+               }
+
+               /* Receive response header */
+               if (copy_from_user(&g_request.response, user,
+                                  sizeof(g_request.response))) {
+                       ret = -ECOMM;
+                       mc_dev_err(ret, "failed to get response from daemon");
+                       g_request.response.error_no = EPIPE;
+                       goto err;
+               }
+
+               /* Check request ID */
+               if (g_request.request.request_id !=
+                                               g_request.response.request_id) {
+                       ret = -EBADE;
+                       mc_dev_err(ret, "expected id %d, not %d",
+                                  g_request.request.request_id,
+                                  g_request.response.request_id);
+                       g_request.response.error_no = EPIPE;
+                       goto err;
+               }
+
+               /* Response header is acceptable */
+               ret = sizeof(g_request.response);
+               if (g_request.response.length)
+                       server_state_change(RESPONSE_SENT);
+               else
+                       server_state_change(READY);
+
+               goto end;
+       } else if (server_state_is(RESPONSE_SENT)) {
+               /* Server is waiting */
+               server_state_change(DATA_SENT);
+
+               /* Get data */
+               ret = wait_for_completion_interruptible(
+                                               &g_request.client_complete);
+
+               /* Server received a signal, let see if it tries again */
+               if (ret) {
+                       server_state_change(RESPONSE_SENT);
+                       return ret;
+               }
+
+               /* Check client state */
+               if (!client_state_is(BUFFERS_READY)) {
+                       ret = -EPIPE;
+                       mc_dev_err(ret, "expected client state %d, not %d",
+                                  BUFFERS_READY, g_request.client_state);
+                       g_request.response.error_no = EPIPE;
+                       goto err;
+               }
+
+               /* We do not deal with several writes */
+               if (len != g_request.size)
+                       len = g_request.size;
+
+               ret = copy_from_user(g_request.buffer, user, len);
+               if (ret) {
+                       ret = -ECOMM;
+                       mc_dev_err(ret, "failed to get data from daemon");
+                       g_request.response.error_no = EPIPE;
+                       goto err;
+               }
+
+               ret = len;
+               server_state_change(READY);
+               goto end;
+       } else {
+               ret = -ECOMM;
+               goto err;
+       }
+
+err:
+       server_state_change(READY);
+end:
+       complete(&g_request.server_complete);
+       return ret;
+}
+
+static ssize_t admin_read(struct file *file, char __user *user, size_t len,
+                         loff_t *off)
+{
+       /* No offset allowed */
+       if (*off) {
+               int ret = -ECOMM;
+
+               mc_dev_err(ret, "offset not supported");
+               return ret;
+       }
+
+       return nq_get_stop_message(user, len);
+}
+
+static long admin_ioctl(struct file *file, unsigned int cmd,
+                       unsigned long arg)
+{
+       void __user *uarg = (void __user *)arg;
+       int ret = -EINVAL;
+
+       mc_dev_devel("%u from %s", _IOC_NR(cmd), current->comm);
+
+       switch (cmd) {
+       case MC_ADMIN_IO_GET_DRIVER_REQUEST: {
+               /* Update TGID as it may change (when becoming a daemon) */
+               if (l_ctx.admin_tgid != current->tgid) {
+                       l_ctx.admin_tgid = current->tgid;
+                       mc_dev_info("daemon PID changed to %d",
+                                   l_ctx.admin_tgid);
+               }
+
+               /* Block until a request is available */
+               server_state_change(READY);
+               ret = wait_for_completion_interruptible(
+                                               &g_request.client_complete);
+               if (ret)
+                       /* Interrupted by signal */
+                       break;
+
+               /* Check client state */
+               if (!client_state_is(REQUEST_SENT)) {
+                       ret = -EPIPE;
+                       mc_dev_err(ret, "expected client state %d, not %d",
+                                  REQUEST_SENT, g_request.client_state);
+                       g_request.response.error_no = EPIPE;
+                       complete(&g_request.server_complete);
+                       break;
+               }
+
+               /* Send request (the driver request mutex is held) */
+               ret = copy_to_user(uarg, &g_request.request,
+                                  sizeof(g_request.request));
+               if (ret) {
+                       server_state_change(READY);
+                       complete(&g_request.server_complete);
+                       ret = -EPROTO;
+                       break;
+               }
+
+               /* Now that the daemon got it, update the request ID */
+               g_request.request_id++;
+
+               server_state_change(REQUEST_RECEIVED);
+               break;
+       }
+       case MC_ADMIN_IO_GET_INFO: {
+               struct mc_admin_driver_info info;
+
+               info.drv_version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+                                             MCDRVMODULEAPI_VERSION_MINOR);
+               info.initial_cmd_id = g_request.request_id;
+               ret = copy_to_user(uarg, &info, sizeof(info));
+               break;
+       }
+       case MC_ADMIN_IO_LOAD_DRIVER: {
+               struct tee_client *client = file->private_data;
+               struct mc_admin_load_info info;
+
+               if (copy_from_user(&info, uarg, sizeof(info))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               /* Make sure we have a local client */
+               if (!client) {
+                       client = client_create(true);
+                       /* Store client for future use/close */
+                       file->private_data = client;
+               }
+
+               if (!client) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               ret = load_driver(client, &info);
+               break;
+       }
+       case MC_ADMIN_IO_LOAD_TOKEN: {
+               struct mc_admin_load_info info;
+
+               if (copy_from_user(&info, uarg, sizeof(info))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = load_token(&info);
+               break;
+       }
+       case MC_ADMIN_IO_LOAD_CHECK: {
+               struct mc_admin_load_info info;
+
+               if (copy_from_user(&info, uarg, sizeof(info))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = load_check(&info);
+               break;
+       }
+       default:
+               ret = -ENOIOCTLCMD;
+       }
+
+       return ret;
+}
+
+/*
+ * mc_fd_release() - This function will be called from user space as close(...)
+ * The client data are freed and the associated memory pages are unreserved.
+ *
+ * @inode
+ * @file
+ *
+ * Returns 0
+ */
+static int admin_release(struct inode *inode, struct file *file)
+{
+       /* Close client if any */
+       if (file->private_data)
+               client_close((struct tee_client *)file->private_data);
+
+       /* Requests from driver to daemon */
+       mutex_lock(&g_request.states_mutex);
+       mc_dev_devel("server state changes from %d to %d",
+                    g_request.server_state, NOT_CONNECTED);
+       g_request.server_state = NOT_CONNECTED;
+       /* A non-zero command indicates that a thread is waiting */
+       if (g_request.client_state != IDLE) {
+               g_request.response.error_no = ESHUTDOWN;
+               complete(&g_request.server_complete);
+       }
+       mutex_unlock(&g_request.states_mutex);
+       mc_dev_info("daemon connection closed, TGID %d", l_ctx.admin_tgid);
+       l_ctx.admin_tgid = 0;
+
+       /*
+        * ret is quite irrelevant here as most apps don't care about the
+        * return value from close() and it's quite difficult to recover
+        */
+       return 0;
+}
+
+static int admin_open(struct inode *inode, struct file *file)
+{
+       int ret = 0;
+
+       /* Only one connection allowed to admin interface */
+       mutex_lock(&l_ctx.admin_tgid_mutex);
+       if (l_ctx.admin_tgid) {
+               ret = -EBUSY;
+               mc_dev_err(ret, "daemon connection already open, PID %d",
+                          l_ctx.admin_tgid);
+       } else {
+               l_ctx.admin_tgid = current->tgid;
+       }
+       mutex_unlock(&l_ctx.admin_tgid_mutex);
+       if (ret)
+               return ret;
+
+       /* Setup the usual variables */
+       mc_dev_devel("accept %s as daemon", current->comm);
+
+       /*
+        * daemon is connected so now we can safely suppose
+        * the secure world is loaded too
+        */
+       if (l_ctx.last_tee_ret == TEE_START_NOT_TRIGGERED)
+               l_ctx.last_tee_ret = l_ctx.tee_start_cb();
+
+       /* Failed to start the TEE, either now or before */
+       if (l_ctx.last_tee_ret) {
+               mutex_lock(&l_ctx.admin_tgid_mutex);
+               l_ctx.admin_tgid = 0;
+               mutex_unlock(&l_ctx.admin_tgid_mutex);
+               return l_ctx.last_tee_ret;
+       }
+
+       reinit_completion_local(&g_request.client_complete);
+       reinit_completion_local(&g_request.server_complete);
+       /* Requests from driver to daemon */
+       mc_dev_info("daemon connection open, TGID %d", l_ctx.admin_tgid);
+       return 0;
+}
+
+/* function table structure of this device driver. */
+static const struct file_operations mc_admin_fops = {
+       .owner = THIS_MODULE,
+       .open = admin_open,
+       .release = admin_release,
+       .unlocked_ioctl = admin_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = admin_ioctl,
+#endif
+       .write = admin_write,
+       .read = admin_read,
+};
+
+int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
+                 void (*tee_stop_cb)(void))
+{
+       mutex_init(&l_ctx.admin_tgid_mutex);
+       /* Requests from driver to daemon */
+       mutex_init(&g_request.mutex);
+       mutex_init(&g_request.states_mutex);
+       g_request.request_id = 42;
+       init_completion(&g_request.client_complete);
+       init_completion(&g_request.server_complete);
+       l_ctx.tee_stop_notifier.notifier_call = tee_stop_notifier_fn;
+       nq_register_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+       /* Create char device */
+       cdev_init(cdev, &mc_admin_fops);
+       /* Register the call back for starting the secure world */
+       l_ctx.tee_start_cb = tee_start_cb;
+       l_ctx.tee_stop_cb = tee_stop_cb;
+       l_ctx.last_tee_ret = TEE_START_NOT_TRIGGERED;
+       return 0;
+}
+
+void mc_admin_exit(void)
+{
+       nq_unregister_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+       if (!l_ctx.last_tee_ret)
+               l_ctx.tee_stop_cb();
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/admin.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/admin.h
new file mode 100755 (executable)
index 0000000..8f8667e
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_ADMIN_H_
+#define _MC_ADMIN_H_
+
+struct cdev;
+struct mc_uuid_t;
+struct tee_object;
+
+int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
+                 void (*tee_stop_cb)(void));
+void mc_admin_exit(void);
+
+struct tee_object *tee_object_select(const struct mc_uuid_t *uuid);
+struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp);
+struct tee_object *tee_object_copy(uintptr_t address, size_t length);
+struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length);
+void tee_object_free(struct tee_object *object);
+
+#endif /* _MC_ADMIN_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/arm.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/arm.h
new file mode 100755 (executable)
index 0000000..02dd0c5
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_ARM_H_
+#define _MC_ARM_H_
+
+#include "main.h"
+
+#ifdef CONFIG_ARM64
+static inline bool has_security_extensions(void)
+{
+       return true;
+}
+
+static inline bool is_secure_mode(void)
+{
+       return false;
+}
+#else
+/*
+ * ARM Trustzone specific masks and modes
+ * Vanilla Linux is unaware of TrustZone extension.
+ * I.e. arch/arm/include/asm/ptrace.h does not define monitor mode.
+ * Also TZ bits in cpuid are not defined, ARM port uses magic numbers,
+ * see arch/arm/kernel/setup.c
+ */
+#define ARM_MONITOR_MODE               (0x16) /*(0b10110)*/
+#define ARM_SECURITY_EXTENSION_MASK    (0x30)
+
+/* check if CPU supports the ARM TrustZone Security Extensions */
+static inline bool has_security_extensions(void)
+{
+       u32 fea = 0;
+
+       asm volatile(
+               "mrc p15, 0, %[fea], cr0, cr1, 0" :
+               [fea]"=r" (fea));
+
+       mc_dev_devel("CPU Features: 0x%X", fea);
+
+       /*
+        * If the CPU features ID has 0 for security features then the CPU
+        * doesn't support TrustZone at all!
+        */
+       if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
+               return false;
+
+       return true;
+}
+
+/* check if running in secure mode */
+static inline bool is_secure_mode(void)
+{
+       u32 cpsr = 0;
+       u32 nsacr = 0;
+
+       asm volatile(
+               "mrc    p15, 0, %[nsacr], cr1, cr1, 2\n"
+               "mrs %[cpsr], cpsr\n" :
+               [nsacr]"=r" (nsacr),
+               [cpsr]"=r"(cpsr));
+
+       mc_dev_devel("CPRS.M = set to 0x%X", cpsr & MODE_MASK);
+       mc_dev_devel("SCR.NS = set to 0x%X", nsacr);
+
+       /*
+        * If the NSACR contains the reset value(=0) then most likely we are
+        * running in Secure MODE.
+        * If the cpsr mode is set to monitor mode then we cannot load!
+        */
+       if (nsacr == 0 || ((cpsr & MODE_MASK) == ARM_MONITOR_MODE))
+               return true;
+
+       return false;
+}
+#endif
+
+#endif /* _MC_ARM_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/build_tag.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/build_tag.h
new file mode 100644 (file)
index 0000000..d457b35
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MOBICORE_COMPONENT_BUILD_TAG
+#define MOBICORE_COMPONENT_BUILD_TAG \
+       "t-base-Exynos-Android-410a-v001-20180329_190510_46723_76543"
+#endif
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/client.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/client.c
new file mode 100755 (executable)
index 0000000..e94a54d
--- /dev/null
@@ -0,0 +1,1454 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/sched.h>       /* struct task_struct */
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/mm.h>    /* get_task_mm */
+#include <linux/sched/task.h>  /* put_task_struct */
+#endif
+#include <net/sock.h>          /* sockfd_lookup */
+#include <linux/file.h>                /* fput */
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "main.h"
+#include "mmu.h"
+#include "session.h"
+#include "client.h"
+
+/* Client/context */
+struct tee_client {
+       /* PID of task that opened the device, 0 if kernel */
+       pid_t                   pid;
+       /* Command for task*/
+       char                    comm[TASK_COMM_LEN];
+       /* Number of references kept to this object */
+       struct kref             kref;
+       /* List of contiguous buffers allocated by mcMallocWsm for the client */
+       struct list_head        cbufs;
+       struct mutex            cbufs_lock;     /* lock for the cbufs list */
+       /* List of TA sessions opened by this client */
+       struct list_head        sessions;
+       struct list_head        closing_sessions;
+       struct mutex            sessions_lock;  /* sessions list + closing */
+       /* Client lock for quick WSMs and operations changes */
+       struct mutex            quick_lock;
+       /* List of WSMs for a client */
+       struct list_head        cwsms;
+       /* List of GP operation for a client */
+       struct list_head        operations;
+       /* The list entry to attach to "ctx.clients" list */
+       struct list_head        list;
+       /* task_struct for the client application, if going through a proxy */
+       struct task_struct      *task;
+};
+
+/* Context */
+static struct client_ctx {
+       /* Clients list */
+       struct mutex            clients_lock;
+       struct list_head        clients;
+       /* Clients waiting for their last cbuf to be released */
+       struct mutex            closing_clients_lock;
+       struct list_head        closing_clients;
+} client_ctx;
+
+/* Buffer shared with SWd at client level */
+struct cwsm {
+       /* Client this cbuf belongs to */
+       struct tee_client       *client;
+       /* Buffer info */
+       struct gp_shared_memory memref;
+       /* MMU L2 table */
+       struct tee_mmu          *mmu;
+       /* Buffer SWd addr */
+       u32                     sva;
+       /* Number of references kept to this object */
+       struct kref             kref;
+       /* The list entry for the client to list its WSMs */
+       struct list_head        list;
+};
+
+/*
+ * Contiguous buffer allocated to TLCs.
+ * These buffers are used as world shared memory (wsm) to share with
+ * secure world.
+ */
+struct cbuf {
+       /* Client this cbuf belongs to */
+       struct tee_client       *client;
+       /* List element for client's list of cbuf's */
+       struct list_head        list;
+       /* Number of references kept to this buffer */
+       struct kref             kref;
+       /* virtual Kernel start address */
+       uintptr_t               addr;
+       /* virtual Userspace start address */
+       uintptr_t               uaddr;
+       /* physical start address */
+       phys_addr_t             phys;
+       /* 2^order = number of pages allocated */
+       unsigned int            order;
+       /* Length of memory mapped to user */
+       u32                     len;
+       /* Has been freed via the API */
+       bool                    api_freed;
+};
+
+static inline void cbuf_get(struct cbuf *cbuf)
+{
+       kref_get(&cbuf->kref);
+}
+
+/* Must only be called by tee_cbuf_put */
+static void cbuf_release(struct kref *kref)
+{
+       struct cbuf *cbuf = container_of(kref, struct cbuf, kref);
+       struct tee_client *client = cbuf->client;
+
+       /* Unlist from client */
+       list_del_init(&cbuf->list);
+       /* Release client token */
+       client_put(client);
+       /* Free */
+       free_pages(cbuf->addr, cbuf->order);
+       mc_dev_devel("freed cbuf %p: client %p addr %lx uaddr %lx len %u",
+                    cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
+       kfree(cbuf);
+       /* Decrement debug counter */
+       atomic_dec(&g_ctx.c_cbufs);
+}
+
+void tee_cbuf_put(struct cbuf *cbuf)
+{
+       struct tee_client *client = cbuf->client;
+
+       mutex_lock(&client->cbufs_lock);
+       kref_put(&cbuf->kref, cbuf_release);
+       mutex_unlock(&client->cbufs_lock);
+}
+
+/*
+ * Map a kernel contiguous buffer to user space
+ */
+static int cbuf_map(struct vm_area_struct *vmarea, uintptr_t addr, u32 len,
+                   uintptr_t *uaddr)
+{
+       int ret;
+
+       if (!uaddr)
+               return -EINVAL;
+
+       if (!vmarea)
+               return -EINVAL;
+
+       if (!addr)
+               return -EINVAL;
+
+       if (len != (u32)(vmarea->vm_end - vmarea->vm_start)) {
+               ret = -EINVAL;
+               mc_dev_err(ret, "cbuf incompatible with vma");
+               return ret;
+       }
+
+       vmarea->vm_flags |= VM_IO;
+       ret = remap_pfn_range(vmarea, vmarea->vm_start,
+                             page_to_pfn(virt_to_page(addr)),
+                             vmarea->vm_end - vmarea->vm_start,
+                             vmarea->vm_page_prot);
+       if (ret) {
+               *uaddr = 0;
+               mc_dev_err(ret, "User mapping failed");
+               return ret;
+       }
+
+       *uaddr = vmarea->vm_start;
+       return 0;
+}
+
+/*
+ * Returns true if client is a kernel object.
+ */
+static inline bool client_is_kernel(struct tee_client *client)
+{
+       return !client->pid;
+}
+
+static struct cwsm *cwsm_create(struct tee_client *client,
+                               struct tee_mmu *mmu,
+                               const struct gp_shared_memory *memref,
+                               struct gp_return *gp_ret)
+{
+       struct cwsm *cwsm;
+       u32 sva;
+       int ret;
+
+       cwsm = kzalloc(sizeof(*cwsm), GFP_KERNEL);
+       if (!cwsm)
+               return ERR_PTR(iwp_set_ret(-ENOMEM, gp_ret));
+
+       if (mmu) {
+               cwsm->mmu = mmu;
+               tee_mmu_get(cwsm->mmu);
+       } else {
+               struct mc_ioctl_buffer buf = {
+                       .va = (uintptr_t)memref->buffer,
+                       .len = memref->size,
+                       .flags = memref->flags,
+               };
+
+               if (client_is_kernel(client)) {
+                       cwsm->mmu = tee_mmu_create(NULL, &buf);
+               } else {
+                       struct mm_struct *mm = get_task_mm(current);
+
+                       if (!mm) {
+                               ret = -EPERM;
+                               mc_dev_err(ret, "can't get mm");
+                               goto err_cwsm;
+                       }
+
+                       /* Build MMU table for buffer */
+                       cwsm->mmu = tee_mmu_create(mm, &buf);
+                       mmput(mm);
+               }
+
+               if (IS_ERR(cwsm->mmu)) {
+                       ret = iwp_set_ret(PTR_ERR(cwsm->mmu), gp_ret);
+                       goto err_cwsm;
+               }
+       }
+
+       ret = iwp_register_shared_mem(cwsm->mmu, &sva, gp_ret);
+       if (ret)
+               goto err_mmu;
+
+       /* Get a token on the client */
+       client_get(client);
+       cwsm->client = client;
+       memcpy(&cwsm->memref, memref, sizeof(cwsm->memref));
+       cwsm->sva = sva;
+       kref_init(&cwsm->kref);
+       INIT_LIST_HEAD(&cwsm->list);
+       /* Add buffer to list */
+       mutex_lock(&client->quick_lock);
+       list_add_tail(&cwsm->list, &client->cwsms);
+       mutex_unlock(&client->quick_lock);
+       mc_dev_devel("created cwsm %p: client %p sva %x", cwsm, client, sva);
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_cwsms);
+       return cwsm;
+
+err_mmu:
+       tee_mmu_put(cwsm->mmu);
+err_cwsm:
+       kfree(cwsm);
+       return ERR_PTR(ret);
+}
+
+static inline void cwsm_get(struct cwsm *cwsm)
+{
+       kref_get(&cwsm->kref);
+}
+
+/* Must only be called by cwsm_put */
+static void cwsm_release(struct kref *kref)
+{
+       struct cwsm *cwsm = container_of(kref, struct cwsm, kref);
+       struct tee_client *client = cwsm->client;
+       struct mcp_buffer_map map;
+
+       /* Unlist from client */
+       list_del_init(&cwsm->list);
+       /* Unmap buffer from SWd (errors ignored) */
+       tee_mmu_buffer(cwsm->mmu, &map);
+       map.secure_va = cwsm->sva;
+       iwp_release_shared_mem(&map);
+       /* Release MMU */
+       tee_mmu_put(cwsm->mmu);
+       /* Release client token */
+       client_put(client);
+       /* Free */
+       mc_dev_devel("freed cwsm %p: client %p", cwsm, client);
+       kfree(cwsm);
+       /* Decrement debug counter */
+       atomic_dec(&g_ctx.c_cwsms);
+}
+
+static inline void cwsm_put(struct cwsm *cwsm)
+{
+       struct tee_client *client = cwsm->client;
+
+       mutex_lock(&client->quick_lock);
+       kref_put(&cwsm->kref, cwsm_release);
+       mutex_unlock(&client->quick_lock);
+}
+
+static inline struct cwsm *cwsm_find(struct tee_client *client,
+                                    const struct gp_shared_memory *memref)
+{
+       struct cwsm *cwsm = NULL, *candidate;
+
+       mc_dev_devel("find shared mem for buf %llx size %llu flags %x",
+                    memref->buffer, memref->size, memref->flags);
+       mutex_lock(&client->quick_lock);
+       list_for_each_entry(candidate, &client->cwsms, list) {
+               mc_dev_devel("candidate buf %llx size %llu flags %x",
+                            candidate->memref.buffer, candidate->memref.size,
+                            candidate->memref.flags);
+               if (candidate->memref.buffer == memref->buffer &&
+                   candidate->memref.size == memref->size &&
+                   candidate->memref.flags == memref->flags) {
+                       cwsm = candidate;
+                       cwsm_get(cwsm);
+                       mc_dev_devel("match");
+                       break;
+               }
+       }
+       mutex_unlock(&client->quick_lock);
+       return cwsm;
+}
+
+static inline struct cwsm *cwsm_find_by_sva(struct tee_client *client, u32 sva)
+{
+       struct cwsm *cwsm = NULL, *candidate;
+
+       mutex_lock(&client->quick_lock);
+       list_for_each_entry(candidate, &client->cwsms, list)
+               if (candidate->sva == sva) {
+                       cwsm = candidate;
+                       cwsm_get(cwsm);
+                       break;
+               }
+       mutex_unlock(&client->quick_lock);
+       return cwsm;
+}
+
+/*
+ * Returns the secure virtual address from a registered mem
+ */
+u32 client_get_cwsm_sva(struct tee_client *client,
+                       const struct gp_shared_memory *memref)
+{
+       struct cwsm *cwsm = cwsm_find(client, memref);
+
+       if (!cwsm)
+               return 0;
+
+       mc_dev_devel("found sva %x", cwsm->sva);
+       return cwsm->sva;
+}
+
+void client_get(struct tee_client *client)
+{
+       kref_get(&client->kref);
+}
+
+void client_put_cwsm_sva(struct tee_client *client, u32 sva)
+{
+       struct cwsm *cwsm = cwsm_find_by_sva(client, sva);
+
+       if (!cwsm)
+               return;
+
+       /* Release reference taken by cwsm_find */
+       cwsm_put(cwsm);
+       cwsm_put(cwsm);
+}
+
+/*
+ * Allocate and initialize a client object
+ */
+struct tee_client *client_create(bool is_from_kernel)
+{
+       struct tee_client *client;
+
+       /* Allocate client structure */
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (!client)
+               return NULL;
+
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_clients);
+       /* initialize members */
+       client->pid = is_from_kernel ? 0 : current->pid;
+       memcpy(client->comm, current->comm, sizeof(client->comm));
+       kref_init(&client->kref);
+       INIT_LIST_HEAD(&client->cbufs);
+       mutex_init(&client->cbufs_lock);
+       INIT_LIST_HEAD(&client->sessions);
+       INIT_LIST_HEAD(&client->closing_sessions);
+       mutex_init(&client->sessions_lock);
+       INIT_LIST_HEAD(&client->list);
+       mutex_init(&client->quick_lock);
+       INIT_LIST_HEAD(&client->cwsms);
+       INIT_LIST_HEAD(&client->operations);
+       /* Add client to list of clients */
+       mutex_lock(&client_ctx.clients_lock);
+       list_add_tail(&client->list, &client_ctx.clients);
+       mutex_unlock(&client_ctx.clients_lock);
+       mc_dev_devel("created client %p", client);
+       return client;
+}
+
+/* Must only be called by client_put */
+static void client_release(struct kref *kref)
+{
+       struct tee_client *client;
+
+       client = container_of(kref, struct tee_client, kref);
+       /* Client is closed, remove from closing list */
+       list_del(&client->list);
+       mc_dev_devel("freed client %p", client);
+       if (client->task)
+               put_task_struct(client->task);
+
+       kfree(client);
+       /* Decrement debug counter */
+       atomic_dec(&g_ctx.c_clients);
+}
+
+int client_put(struct tee_client *client)
+{
+       int ret;
+
+       mutex_lock(&client_ctx.closing_clients_lock);
+       ret = kref_put(&client->kref, client_release);
+       mutex_unlock(&client_ctx.closing_clients_lock);
+       return ret;
+}
+
+/*
+ * Set client "closing" state, only if it contains no session.
+ * Once in "closing" state, system "close" can be called.
+ * Return: 0 if this state could be set.
+ */
+bool client_has_sessions(struct tee_client *client)
+{
+       bool ret;
+
+       /* Check for sessions */
+       mutex_lock(&client->sessions_lock);
+       ret = !list_empty(&client->sessions);
+       mutex_unlock(&client->sessions_lock);
+       mc_dev_devel("client %p, exit with %d", client, ret);
+       return ret;
+}
+
+static inline void client_put_session(struct tee_client *client,
+                                     struct tee_session *session)
+{
+       /* Remove session from client's closing list */
+       mutex_lock(&client->sessions_lock);
+       list_del(&session->list);
+       mutex_unlock(&client->sessions_lock);
+       /* Release the ref we took on creation */
+       session_put(session);
+}
+
+/*
+ * At this point, nobody has access to the client anymore, so no new sessions
+ * are being created.
+ */
+static void client_close_sessions(struct tee_client *client)
+{
+       struct tee_session *session;
+
+       mutex_lock(&client->sessions_lock);
+       while (!list_empty(&client->sessions)) {
+               session = list_first_entry(&client->sessions,
+                                          struct tee_session, list);
+
+               /* Move session to closing sessions list */
+               list_move(&session->list, &client->closing_sessions);
+               /* Call session_close without lock */
+               mutex_unlock(&client->sessions_lock);
+               if (!session_close(session))
+                       client_put_session(client, session);
+               mutex_lock(&client->sessions_lock);
+       }
+
+       mutex_unlock(&client->sessions_lock);
+}
+
+/*
+ * At this point, nobody has access to the client anymore, so no new contiguous
+ * buffers are being created.
+ */
+static void client_close_kernel_cbufs(struct tee_client *client)
+{
+       /* Put buffers allocated and not freed via the kernel API */
+       if (!client_is_kernel(client))
+               return;
+
+       /* Look for cbufs that the client has not freed and put them */
+       while (true) {
+               struct cbuf *cbuf = NULL, *candidate;
+
+               mutex_lock(&client->cbufs_lock);
+               list_for_each_entry(candidate, &client->cbufs, list) {
+                       if (!candidate->api_freed) {
+                               candidate->api_freed = true;
+                               cbuf = candidate;
+                               break;
+                       }
+               }
+               mutex_unlock(&client->cbufs_lock);
+
+               if (!cbuf)
+                       break;
+
+               tee_cbuf_put(cbuf);
+       }
+}
+
+/* Client is closing: make sure all CSMs are gone */
+static void client_release_cwsms(struct tee_client *client)
+{
+       /* Look for cbufs that the client has not freed and put them */
+       while (!list_empty(&client->cwsms)) {
+               struct cwsm *cwsm;
+
+               cwsm = list_first_entry(&client->cwsms, struct cwsm, list);
+               cwsm_put(cwsm);
+       }
+}
+
+/* Client is closing: make sure all cancelled operations are gone */
+static void client_release_gp_operations(struct tee_client *client)
+{
+       struct client_gp_operation *op, *nop;
+
+       mutex_lock(&client->quick_lock);
+       list_for_each_entry_safe(op, nop, &client->operations, list) {
+               /* Only cancelled operations are kzalloc'd */
+               mc_dev_devel("flush cancelled operation %p for started %llu",
+                            op, op->started);
+               if (op->cancelled)
+                       kfree(op);
+       }
+       mutex_unlock(&client->quick_lock);
+}
+
+/*
+ * Release a client and the session+cbuf objects it contains.
+ * @param client_t client
+ * @return driver error code
+ */
+void client_close(struct tee_client *client)
+{
+       /* Move client from active clients to closing clients for debug */
+       mutex_lock(&client_ctx.clients_lock);
+       mutex_lock(&client_ctx.closing_clients_lock);
+       list_move(&client->list, &client_ctx.closing_clients);
+       mutex_unlock(&client_ctx.closing_clients_lock);
+       mutex_unlock(&client_ctx.clients_lock);
+       client_close_kernel_cbufs(client);
+       /* Close all remaining sessions */
+       client_close_sessions(client);
+       /* Release all cwsms, no need to lock as sessions are closed */
+       client_release_cwsms(client);
+       client_release_gp_operations(client);
+       client_put(client);
+       mc_dev_devel("client %p closed", client);
+}
+
+/*
+ * Clean all structures shared with the SWd (note: incomplete but unused)
+ */
+void client_cleanup(void)
+{
+       struct tee_client *client;
+
+       mutex_lock(&client_ctx.clients_lock);
+       list_for_each_entry(client, &client_ctx.clients, list) {
+               mutex_lock(&client->sessions_lock);
+               while (!list_empty(&client->sessions)) {
+                       struct tee_session *session;
+
+                       session = list_first_entry(&client->sessions,
+                                                  struct tee_session, list);
+                       list_del(&session->list);
+                       session_mc_cleanup_session(session);
+               }
+               mutex_unlock(&client->sessions_lock);
+       }
+       mutex_unlock(&client_ctx.clients_lock);
+}
+
+/*
+ * Open TA for given client. TA binary is provided by the daemon.
+ * @param
+ * @return driver error code
+ */
+int client_mc_open_session(struct tee_client *client,
+                          const struct mc_uuid_t *uuid,
+                          uintptr_t tci_va, size_t tci_len, u32 *session_id)
+{
+       struct mcp_open_info info = {
+               .type = TEE_MC_UUID,
+               .uuid = uuid,
+               .tci_va = tci_va,
+               .tci_len = tci_len,
+               .user = !client_is_kernel(client),
+       };
+       int ret;
+
+       ret = client_mc_open_common(client, &info, session_id);
+       mc_dev_devel("session %x, exit with %d", *session_id, ret);
+       return ret;
+}
+
+/*
+ * Open TA for given client. TA binary is provided by the client.
+ * @param
+ * @return driver error code
+ */
+int client_mc_open_trustlet(struct tee_client *client,
+                           u32 spid, uintptr_t ta_va, size_t ta_len,
+                           uintptr_t tci_va, size_t tci_len, u32 *session_id)
+{
+       struct mcp_open_info info = {
+               .type = TEE_MC_TA,
+               .spid = spid,
+               .va = ta_va,
+               .len = ta_len,
+               .tci_va = tci_va,
+               .tci_len = tci_len,
+               .user = !client_is_kernel(client),
+       };
+       int ret;
+
+       ret = client_mc_open_common(client, &info, session_id);
+       mc_dev_devel("session %x, exit with %d", *session_id, ret);
+       return ret;
+}
+
+/*
+ * Opens a TA and add corresponding session object to given client
+ * return: driver error code
+ */
+int client_mc_open_common(struct tee_client *client, struct mcp_open_info *info,
+                         u32 *session_id)
+{
+       struct tee_session *session = NULL;
+       int ret = 0;
+
+       /*
+        * Create session object with temp sid=0 BEFORE session is started,
+        * otherwise if a GP TA is started and NWd session object allocation
+        * fails, we cannot handle the potentially delayed GP closing.
+        * Adding session to list must be done AFTER it is started (once we have
+        * sid), therefore it cannot be done within session_create().
+        */
+       session = session_create(client, NULL);
+       if (IS_ERR(session))
+               return PTR_ERR(session);
+
+       ret = session_mc_open_session(session, info);
+       if (ret)
+               goto err;
+
+       mutex_lock(&client->sessions_lock);
+       /* Add session to client */
+       list_add_tail(&session->list, &client->sessions);
+       /* Set session ID returned by SWd */
+       *session_id = session->mcp_session.sid;
+       mutex_unlock(&client->sessions_lock);
+
+err:
+       /* Close or free session on error */
+       if (ret == -ENODEV) {
+               /* The session must enter the closing process... */
+               list_add_tail(&session->list, &client->closing_sessions);
+               if (!session_close(session))
+                       client_put_session(client, session);
+       } else if (ret) {
+               session_put(session);
+       }
+
+       return ret;
+}
+
+/*
+ * Remove a session object from client and close corresponding TA
+ * Return: true if session was found and closed
+ */
+int client_remove_session(struct tee_client *client, u32 session_id)
+{
+       struct tee_session *session = NULL, *candidate;
+       int ret;
+
+       /* Move session from main list to closing list */
+       mutex_lock(&client->sessions_lock);
+       list_for_each_entry(candidate, &client->sessions, list) {
+               if (candidate->mcp_session.sid == session_id) {
+                       session = candidate;
+                       list_move(&session->list, &client->closing_sessions);
+                       break;
+               }
+       }
+
+       mutex_unlock(&client->sessions_lock);
+       if (!session)
+               return -ENXIO;
+
+       /* Close session */
+       ret = session_close(session);
+       if (!ret)
+               client_put_session(client, session);
+
+       return ret;
+}
+
+/*
+ * Find a session object and increment its reference counter.
+ * Object cannot be freed until its counter reaches 0.
+ * return: pointer to the object, NULL if not found.
+ */
+static struct tee_session *client_get_session(struct tee_client *client,
+                                             u32 session_id)
+{
+       struct tee_session *session = NULL, *candidate;
+
+       mutex_lock(&client->sessions_lock);
+       list_for_each_entry(candidate, &client->sessions, list) {
+               if (candidate->mcp_session.sid == session_id) {
+                       session = candidate;
+                       session_get(session);
+                       break;
+               }
+       }
+
+       mutex_unlock(&client->sessions_lock);
+       if (!session)
+               mc_dev_err(-ENXIO, "session %x not found", session_id);
+
+       return session;
+}
+
+/*
+ * Send a notification to TA
+ * @return driver error code
+ */
+int client_notify_session(struct tee_client *client, u32 session_id)
+{
+       struct tee_session *session;
+       int ret;
+
+       /* Find/get session */
+       session = client_get_session(client, session_id);
+       if (!session)
+               return -ENXIO;
+
+       /* Send command to SWd */
+       ret = session_mc_notify(session);
+       /* Put session */
+       session_put(session);
+       mc_dev_devel("session %x, exit with %d", session_id, ret);
+       return ret;
+}
+
+/*
+ * Wait for a notification from TA
+ * @return driver error code
+ */
+int client_waitnotif_session(struct tee_client *client, u32 session_id,
+                            s32 timeout, bool silent_expiry)
+{
+       struct tee_session *session;
+       int ret;
+
+       /* Find/get session */
+       session = client_get_session(client, session_id);
+       if (!session)
+               return -ENXIO;
+
+       ret = session_mc_wait(session, timeout, silent_expiry);
+       /* Put session */
+       session_put(session);
+       mc_dev_devel("session %x, exit with %d", session_id, ret);
+       return ret;
+}
+
+/*
+ * Read session exit/termination code
+ */
+int client_get_session_exitcode(struct tee_client *client, u32 session_id,
+                               s32 *err)
+{
+       struct tee_session *session;
+       int ret;
+
+       /* Find/get session */
+       session = client_get_session(client, session_id);
+       if (!session)
+               return -ENXIO;
+
+       /* Retrieve error */
+       ret = session_mc_get_err(session, err);
+       /* Put session */
+       session_put(session);
+       mc_dev_devel("session %x, exit code %d", session_id, *err);
+       return ret;
+}
+
+/* Share a buffer with given TA in SWd */
+int client_mc_map(struct tee_client *client, u32 session_id,
+                 struct tee_mmu *mmu, struct mc_ioctl_buffer *buf)
+{
+       struct tee_session *session;
+       int ret;
+
+       /* Find/get session */
+       session = client_get_session(client, session_id);
+       if (!session)
+               return -ENXIO;
+
+       /* Add buffer to the session */
+       ret = session_mc_map(session, mmu, buf);
+       /* Put session */
+       session_put(session);
+       mc_dev_devel("session %x, exit with %d", session_id, ret);
+       return ret;
+}
+
+/* Stop sharing a buffer with SWd */
+int client_mc_unmap(struct tee_client *client, u32 session_id,
+                   const struct mc_ioctl_buffer *buf)
+{
+       struct tee_session *session;
+       int ret;
+
+       /* Find/get session */
+       session = client_get_session(client, session_id);
+       if (!session)
+               return -ENXIO;
+
+       /* Remove buffer from session */
+       ret = session_mc_unmap(session, buf);
+       /* Put session */
+       session_put(session);
+       mc_dev_devel("session %x, exit with %d", session_id, ret);
+       return ret;
+}
+
+int client_gp_initialize_context(struct tee_client *client,
+                                struct gp_return *gp_ret)
+{
+       return iwp_set_ret(0, gp_ret);
+}
+
+int client_gp_register_shared_mem(struct tee_client *client,
+                                 struct tee_mmu *mmu, u32 *sva,
+                                 const struct gp_shared_memory *memref,
+                                 struct gp_return *gp_ret)
+{
+       struct cwsm *cwsm = NULL;
+
+       if (!mmu)
+               /* cwsm_find automatically takes a reference */
+               cwsm = cwsm_find(client, memref);
+
+       if (!cwsm)
+               cwsm = cwsm_create(client, mmu, memref, gp_ret);
+
+       /* gp_ret set by callee */
+       if (IS_ERR(cwsm))
+               return PTR_ERR(cwsm);
+
+       if (sva)
+               *sva = cwsm->sva;
+
+       return iwp_set_ret(0, gp_ret);
+}
+
+int client_gp_release_shared_mem(struct tee_client *client,
+                                const struct gp_shared_memory *memref)
+{
+       struct cwsm *cwsm = cwsm_find(client, memref);
+
+       if (!cwsm)
+               return -ENOENT;
+
+       /* Release reference taken by cwsm_find */
+       cwsm_put(cwsm);
+       cwsm_put(cwsm);
+       return 0;
+}
+
+/*
+ * Opens a TA and add corresponding session object to given client
+ * return: driver error code
+ */
+int client_gp_open_session(struct tee_client *client,
+                          const struct mc_uuid_t *uuid,
+                          struct gp_operation *operation,
+                          const struct mc_identity *identity,
+                          struct gp_return *gp_ret,
+                          u32 *session_id)
+{
+       struct tee_session *session = NULL;
+       int ret = 0;
+
+       /*
+        * Create session object with temp sid=0 BEFORE session is started,
+        * otherwise if a GP TA is started and NWd session object allocation
+        * fails, we cannot handle the potentially delayed GP closing.
+        * Adding session to list must be done AFTER it is started (once we have
+        * sid), therefore it cannot be done within session_create().
+        */
+       session = session_create(client, identity);
+       if (IS_ERR(session))
+               return iwp_set_ret(PTR_ERR(session), gp_ret);
+
+       /* Open session */
+       ret = session_gp_open_session(session, uuid, operation, gp_ret);
+       if (ret)
+               goto end;
+
+       mutex_lock(&client->sessions_lock);
+       /* Add session to client */
+       list_add_tail(&session->list, &client->sessions);
+       mutex_unlock(&client->sessions_lock);
+       /* Set sid returned by SWd */
+       *session_id = session->iwp_session.sid;
+
+end:
+       if (ret)
+               session_put(session);
+
+       mc_dev_devel("gp session %x, exit with %d", *session_id, ret);
+       return ret;
+}
+
+int client_gp_open_session_domu(struct tee_client *client,
+                               const struct mc_uuid_t *uuid, u64 started,
+                               struct interworld_session *iws,
+                               struct tee_mmu **mmus,
+                               struct gp_return *gp_ret)
+{
+       struct tee_session *session = NULL;
+       int ret = 0;
+
+       /* Don't pass NULL for identity as it would make a MC session */
+       session = session_create(client, ERR_PTR(-ENOENT));
+       if (IS_ERR(session))
+               return iwp_set_ret(PTR_ERR(session), gp_ret);
+
+       /* Open session */
+       ret = session_gp_open_session_domu(session, uuid, started, iws,
+                                          mmus, gp_ret);
+       if (ret)
+               goto end;
+
+       mutex_lock(&client->sessions_lock);
+       /* Add session to client */
+       list_add_tail(&session->list, &client->sessions);
+       mutex_unlock(&client->sessions_lock);
+
+end:
+       if (ret)
+               session_put(session);
+
+       mc_dev_devel("gp session %x, exit with %d",
+                    session->iwp_session.sid, ret);
+       return ret;
+}
+
+int client_gp_close_session(struct tee_client *client, u32 session_id)
+{
+       struct tee_session *session = NULL, *candidate;
+       int ret = 0;
+
+       /* Move session from main list to closing list */
+       mutex_lock(&client->sessions_lock);
+       list_for_each_entry(candidate, &client->sessions, list) {
+               if (candidate->iwp_session.sid == session_id) {
+                       session = candidate;
+                       list_move(&session->list, &client->closing_sessions);
+                       break;
+               }
+       }
+
+       mutex_unlock(&client->sessions_lock);
+       if (!session)
+               return -ENXIO;
+
+       ret = session_close(session);
+       if (!ret)
+               client_put_session(client, session);
+
+       return ret;
+}
+
+/*
+ * Send a command to the TA
+ * @param
+ * @return driver error code
+ */
+int client_gp_invoke_command(struct tee_client *client, u32 session_id,
+                            u32 command_id,
+                            struct gp_operation *operation,
+                            struct gp_return *gp_ret)
+{
+       struct tee_session *session;
+       int ret = 0;
+
+       session = client_get_session(client, session_id);
+       if (!session)
+               return iwp_set_ret(-ENXIO, gp_ret);
+
+       ret = session_gp_invoke_command(session, command_id, operation, gp_ret);
+
+       /* Put session */
+       session_put(session);
+       return ret;
+}
+
+int client_gp_invoke_command_domu(struct tee_client *client, u32 session_id,
+                                 u64 started, struct interworld_session *iws,
+                                 struct tee_mmu **mmus,
+                                 struct gp_return *gp_ret)
+{
+       struct tee_session *session;
+       int ret = 0;
+
+       session = client_get_session(client, session_id);
+       if (!session)
+               return iwp_set_ret(-ENXIO, gp_ret);
+
+       ret = session_gp_invoke_command_domu(session, started, iws, mmus,
+                                            gp_ret);
+
+       /* Put session */
+       session_put(session);
+       return ret;
+}
+
+void client_gp_request_cancellation(struct tee_client *client, u64 started)
+{
+       struct client_gp_operation *op;
+       u64 slot;
+       bool found = false;
+
+       /* Look for operation */
+       mutex_lock(&client->quick_lock);
+       list_for_each_entry(op, &client->operations, list)
+               if (op->started == started) {
+                       slot = op->slot;
+                       found = true;
+                       mc_dev_devel(
+                               "found no operation cancel for started %llu",
+                               started);
+                       break;
+               }
+
+       /* Operation not found: assume it is coming */
+       if (!found) {
+               op = kzalloc(sizeof(*op), GFP_KERNEL);
+               if (op) {
+                       op->started = started;
+                       op->cancelled = true;
+                       list_add_tail(&op->list, &client->operations);
+                       mc_dev_devel(
+                               "add cancelled operation %p for started %llu",
+                               op, op->started);
+               }
+       }
+       mutex_unlock(&client->quick_lock);
+
+       if (found)
+               session_gp_request_cancellation(slot);
+}
+
+/*
+ * This callback is called on remap
+ */
+static void cbuf_vm_open(struct vm_area_struct *vmarea)
+{
+       struct cbuf *cbuf = vmarea->vm_private_data;
+
+       cbuf_get(cbuf);
+}
+
+/*
+ * This callback is called on unmap
+ */
+static void cbuf_vm_close(struct vm_area_struct *vmarea)
+{
+       struct cbuf *cbuf = vmarea->vm_private_data;
+
+       tee_cbuf_put(cbuf);
+}
+
+static const struct vm_operations_struct cbuf_vm_ops = {
+       .open = cbuf_vm_open,
+       .close = cbuf_vm_close,
+};
+
+/*
+ * Create a cbuf object and add it to client
+ */
+int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
+                      struct vm_area_struct *vmarea)
+{
+       struct cbuf *cbuf = NULL;
+       unsigned int order;
+       int ret = 0;
+
+       if (!client)
+               return -EINVAL;
+
+       if (!len || len > BUFFER_LENGTH_MAX)
+               return -EINVAL;
+
+       order = get_order(len);
+       if (order > MAX_ORDER) {
+               ret = -ENOMEM;
+               mc_dev_err(ret, "Buffer size too large");
+               return ret;
+       }
+
+       /* Allocate buffer descriptor structure */
+       cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
+       if (!cbuf)
+               return -ENOMEM;
+
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_cbufs);
+       /* Allocate buffer */
+       cbuf->addr = __get_free_pages(GFP_USER | __GFP_ZERO, order);
+       if (!cbuf->addr) {
+               kfree(cbuf);
+               /* Decrement debug counter */
+               atomic_dec(&g_ctx.c_cbufs);
+               return -ENOMEM;
+       }
+
+       /* Map to user space if applicable */
+       if (!client_is_kernel(client)) {
+               ret = cbuf_map(vmarea, cbuf->addr, len, &cbuf->uaddr);
+               if (ret) {
+                       free_pages(cbuf->addr, order);
+                       kfree(cbuf);
+                       /* Decrement debug counter */
+                       atomic_dec(&g_ctx.c_cbufs);
+                       return ret;
+               }
+       }
+
+       /* Init descriptor members */
+       cbuf->client = client;
+       cbuf->phys = virt_to_phys((void *)cbuf->addr);
+       cbuf->len = len;
+       cbuf->order = order;
+       kref_init(&cbuf->kref);
+       INIT_LIST_HEAD(&cbuf->list);
+
+       /* Keep cbuf in VMA private data for refcounting (user-space clients) */
+       if (vmarea) {
+               vmarea->vm_private_data = cbuf;
+               vmarea->vm_ops = &cbuf_vm_ops;
+       }
+
+       /* Fill return parameter for k-api */
+       if (addr)
+               *addr = cbuf->addr;
+
+       /* Get a token on the client */
+       client_get(client);
+
+       /* Add buffer to list */
+       mutex_lock(&client->cbufs_lock);
+       list_add_tail(&cbuf->list, &client->cbufs);
+       mutex_unlock(&client->cbufs_lock);
+       mc_dev_devel("created cbuf %p: client %p addr %lx uaddr %lx len %u",
+                    cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
+       return ret;
+}
+
+/*
+ * Find a contiguous buffer (cbuf) in the cbuf list of given client that
+ * contains given address and take a reference on it.
+ * Return pointer to the object, or NULL if not found.
+ */
+static struct cbuf *cbuf_get_by_addr(struct tee_client *client, uintptr_t addr)
+{
+       struct cbuf *cbuf = NULL, *candidate;
+       bool is_kernel = client_is_kernel(client);
+
+       mutex_lock(&client->cbufs_lock);
+       list_for_each_entry(candidate, &client->cbufs, list) {
+               /* Compare to kernel VA or user VA depending on client type */
+               uintptr_t start = is_kernel ?
+                       candidate->addr : candidate->uaddr;
+               uintptr_t end = start + candidate->len;
+
+               /* Check that (user) cbuf has not been unmapped */
+               if (!start)
+                       break;
+
+               if (addr >= start && addr < end) {
+                       cbuf = candidate;
+                       break;
+               }
+       }
+
+       if (cbuf)
+               cbuf_get(cbuf);
+
+       mutex_unlock(&client->cbufs_lock);
+       return cbuf;
+}
+
+/*
+ * Remove a cbuf object from client, and mark it for freeing.
+ * Freeing will happen once all current references are released.
+ */
+int client_cbuf_free(struct tee_client *client, uintptr_t addr)
+{
+       struct cbuf *cbuf = cbuf_get_by_addr(client, addr);
+
+       if (!cbuf) {
+               mc_dev_err(-EINVAL, "cbuf %lu not found", addr);
+               return -EINVAL;
+       }
+
+       /* Two references to put: the caller's and the one we just took */
+       tee_cbuf_put(cbuf);
+       mutex_lock(&client->cbufs_lock);
+       cbuf->api_freed = true;
+       mutex_unlock(&client->cbufs_lock);
+       tee_cbuf_put(cbuf);
+       return 0;
+}
+
+bool client_gp_operation_add(struct tee_client *client,
+                            struct client_gp_operation *operation)
+{
+       struct client_gp_operation *op;
+       bool found = false;
+
+       mutex_lock(&client->quick_lock);
+       list_for_each_entry(op, &client->operations, list)
+               if (op->started == operation->started && op->cancelled) {
+                       found = true;
+                       break;
+               }
+
+       if (found) {
+               list_del(&op->list);
+               mc_dev_devel("found cancelled operation %p for started %llu",
+                            op, op->started);
+               kfree(op);
+       } else {
+               list_add_tail(&operation->list, &client->operations);
+               mc_dev_devel("add operation for started %llu",
+                            operation->started);
+       }
+       mutex_unlock(&client->quick_lock);
+       return !found;
+}
+
+void client_gp_operation_remove(struct tee_client *client,
+                               struct client_gp_operation *operation)
+{
+       mutex_lock(&client->quick_lock);
+       list_del(&operation->list);
+       mutex_unlock(&client->quick_lock);
+}
+
+struct tee_mmu *client_mmu_create(struct tee_client *client,
+                                 const struct mc_ioctl_buffer *buf_in,
+                                 struct cbuf **cbuf_p)
+{
+       /* Check if buffer is contained in a cbuf */
+       struct mc_ioctl_buffer buf = *buf_in;
+       struct cbuf *cbuf = cbuf_get_by_addr(client, buf.va);
+       struct mm_struct *mm = NULL;
+       struct tee_mmu *mmu;
+
+       *cbuf_p = cbuf;
+       if (cbuf) {
+               uintptr_t offset;
+
+               if (client_is_kernel(client)) {
+                       offset = buf.va - cbuf->addr;
+               } else {
+                       offset = buf.va - cbuf->uaddr;
+                       /* Update va to point to kernel address */
+                       buf.va = cbuf->addr + offset;
+               }
+
+               if ((offset + buf.len) > cbuf->len) {
+                       mc_dev_err(-EINVAL, "crosses cbuf boundary");
+                       tee_cbuf_put(cbuf);
+                       return ERR_PTR(-EINVAL);
+               }
+       } else if (!client_is_kernel(client)) {
+               mm = get_task_mm(current);
+               if (!mm) {
+                       mc_dev_err(-EPERM, "can't get mm");
+                       return ERR_PTR(-EPERM);
+               }
+       }
+
+       /* Build MMU table for buffer */
+       mmu = tee_mmu_create(mm, &buf);
+       if (mm)
+               mmput(mm);
+
+       if (IS_ERR_OR_NULL(mmu) && cbuf)
+               tee_cbuf_put(cbuf);
+
+       return mmu;
+}
+
+void client_init(void)
+{
+       INIT_LIST_HEAD(&client_ctx.clients);
+       mutex_init(&client_ctx.clients_lock);
+
+       INIT_LIST_HEAD(&client_ctx.closing_clients);
+       mutex_init(&client_ctx.closing_clients_lock);
+}
+
+static inline int cbuf_debug_structs(struct kasnprintf_buf *buf,
+                                    struct cbuf *cbuf)
+{
+       return kasnprintf(buf,
+                         "\tcbuf %pK [%d]: addr %pK uaddr %pK len %u\n",
+                         cbuf, kref_read(&cbuf->kref), (void *)cbuf->addr,
+                         (void *)cbuf->uaddr, cbuf->len);
+}
+
+static inline int cwsm_debug_structs(struct kasnprintf_buf *buf,
+                                    struct cwsm *cwsm)
+{
+       return kasnprintf(buf,
+                         "\tcwsm %pK [%d]: buf %pK len %llu flags 0x%x\n",
+                         cwsm, kref_read(&cwsm->kref),
+                         (void *)(uintptr_t)cwsm->memref.buffer,
+                         cwsm->memref.size, cwsm->memref.flags);
+}
+
+static int client_debug_structs(struct kasnprintf_buf *buf,
+                               struct tee_client *client, bool is_closing)
+{
+       struct cbuf *cbuf;
+       struct cwsm *cwsm;
+       struct tee_session *session;
+       int ret;
+
+       if (client->pid)
+               ret = kasnprintf(buf, "client %pK [%d]: %s (%d)%s\n",
+                                client, kref_read(&client->kref),
+                                client->comm, client->pid,
+                                is_closing ? " <closing>" : "");
+       else
+               ret = kasnprintf(buf, "client %pK [%d]: [kernel]%s\n",
+                                client, kref_read(&client->kref),
+                                is_closing ? " <closing>" : "");
+
+       if (ret < 0)
+               return ret;
+
+       /* Buffers */
+       mutex_lock(&client->cbufs_lock);
+       if (list_empty(&client->cbufs))
+               goto done_cbufs;
+
+       list_for_each_entry(cbuf, &client->cbufs, list) {
+               ret = cbuf_debug_structs(buf, cbuf);
+               if (ret < 0)
+                       goto done_cbufs;
+       }
+
+done_cbufs:
+       mutex_unlock(&client->cbufs_lock);
+       if (ret < 0)
+               return ret;
+
+       /* WMSs */
+       mutex_lock(&client->quick_lock);
+       if (list_empty(&client->cwsms))
+               goto done_cwsms;
+
+       list_for_each_entry(cwsm, &client->cwsms, list) {
+               ret = cwsm_debug_structs(buf, cwsm);
+               if (ret < 0)
+                       goto done_cwsms;
+       }
+
+done_cwsms:
+       mutex_unlock(&client->quick_lock);
+       if (ret < 0)
+               return ret;
+
+       /* Sessions */
+       mutex_lock(&client->sessions_lock);
+       list_for_each_entry(session, &client->sessions, list) {
+               ret = session_debug_structs(buf, session, false);
+               if (ret < 0)
+                       goto done_sessions;
+       }
+
+       list_for_each_entry(session, &client->closing_sessions, list) {
+               ret = session_debug_structs(buf, session, true);
+               if (ret < 0)
+                       goto done_sessions;
+       }
+
+done_sessions:
+       mutex_unlock(&client->sessions_lock);
+
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+int clients_debug_structs(struct kasnprintf_buf *buf)
+{
+       struct tee_client *client;
+       ssize_t ret = 0;
+
+       mutex_lock(&client_ctx.clients_lock);
+       list_for_each_entry(client, &client_ctx.clients, list) {
+               ret = client_debug_structs(buf, client, false);
+               if (ret < 0)
+                       break;
+       }
+       mutex_unlock(&client_ctx.clients_lock);
+
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&client_ctx.closing_clients_lock);
+       list_for_each_entry(client, &client_ctx.closing_clients, list) {
+               ret = client_debug_structs(buf, client, true);
+               if (ret < 0)
+                       break;
+       }
+       mutex_unlock(&client_ctx.closing_clients_lock);
+
+       return ret;
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/client.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/client.h
new file mode 100755 (executable)
index 0000000..54c12d2
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CLIENT_H_
+#define _CLIENT_H_
+
+#include <linux/list.h>
+#include <linux/sched.h>       /* TASK_COMM_LEN */
+
+#include "public/mc_user.h"    /* many types */
+
+struct tee_client;
+struct mcp_open_info;
+struct tee_mmu;
+struct interworld_session;
+
+/* Client */
+struct tee_client *client_create(bool is_from_kernel);
+void client_get(struct tee_client *client);
+int client_put(struct tee_client *client);
+bool client_has_sessions(struct tee_client *client);
+void client_close(struct tee_client *client);
+void client_cleanup(void);
+
+/* MC */
+int client_mc_open_session(struct tee_client *client,
+                          const struct mc_uuid_t *uuid,
+                          uintptr_t tci_va, size_t tci_len, u32 *session_id);
+int client_mc_open_trustlet(struct tee_client *client,
+                           u32 spid, uintptr_t ta_va, size_t ta_len,
+                           uintptr_t tci_va, size_t tci_len, u32 *session_id);
+int client_mc_open_common(struct tee_client *client, struct mcp_open_info *info,
+                         u32 *session_id);
+int client_remove_session(struct tee_client *client, u32 session_id);
+int client_notify_session(struct tee_client *client, u32 session_id);
+int client_waitnotif_session(struct tee_client *client, u32 session_id,
+                            s32 timeout, bool silent_expiry);
+int client_get_session_exitcode(struct tee_client *client, u32 session_id,
+                               s32 *exit_code);
+int client_mc_map(struct tee_client *client, u32 session_id,
+                 struct tee_mmu *mmu, struct mc_ioctl_buffer *buf);
+int client_mc_unmap(struct tee_client *client, u32 session_id,
+                   const struct mc_ioctl_buffer *buf);
+
+/* GP */
+int client_gp_initialize_context(struct tee_client *client,
+                                struct gp_return *gp_ret);
+int client_gp_register_shared_mem(struct tee_client *client,
+                                 struct tee_mmu *mmu, u32 *sva,
+                                 const struct gp_shared_memory *memref,
+                                 struct gp_return *gp_ret);
+int client_gp_release_shared_mem(struct tee_client *client,
+                                const struct gp_shared_memory *memref);
+int client_gp_open_session(struct tee_client *client,
+                          const struct mc_uuid_t *uuid,
+                          struct gp_operation *operation,
+                          const struct mc_identity *identity,
+                          struct gp_return *gp_ret,
+                          u32 *session_id);
+int client_gp_open_session_domu(struct tee_client *client,
+                               const struct mc_uuid_t *uuid, u64 started,
+                               struct interworld_session *iws,
+                               struct tee_mmu **mmus,
+                               struct gp_return *gp_ret);
+int client_gp_close_session(struct tee_client *client, u32 session_id);
+int client_gp_invoke_command(struct tee_client *client, u32 session_id,
+                            u32 command_id,
+                            struct gp_operation *operation,
+                            struct gp_return *gp_ret);
+int client_gp_invoke_command_domu(struct tee_client *client, u32 session_id,
+                                 u64 started, struct interworld_session *iws,
+                                 struct tee_mmu **mmus,
+                                 struct gp_return *gp_ret);
+void client_gp_request_cancellation(struct tee_client *client, u64 started);
+
+/* Contiguous buffer */
+int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
+                      struct vm_area_struct *vmarea);
+int client_cbuf_free(struct tee_client *client, uintptr_t addr);
+
+/* GP internal */
+struct client_gp_operation {
+       u64                     started;
+       u64                     slot;
+       bool                    cancelled;
+       struct list_head        list;
+};
+
+/* Called from session when a new operation starts/ends */
+bool client_gp_operation_add(struct tee_client *client,
+                            struct client_gp_operation *operation);
+void client_gp_operation_remove(struct tee_client *client,
+                               struct client_gp_operation *operation);
+
+/* MMU */
+struct cbuf;
+
+struct tee_mmu *client_mmu_create(struct tee_client *client,
+                                 const struct mc_ioctl_buffer *buf_in,
+                                 struct cbuf **cbuf_p);
+void tee_cbuf_put(struct cbuf *cbuf);
+
+/* Buffer shared with SWd at client level */
+u32 client_get_cwsm_sva(struct tee_client *client,
+                       const struct gp_shared_memory *memref);
+void client_put_cwsm_sva(struct tee_client *client, u32 sva);
+
+/* Global */
+void client_init(void);
+
+/* Debug */
+int clients_debug_structs(struct kasnprintf_buf *buf);
+
+#endif /* _CLIENT_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/clientlib.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/clientlib.c
new file mode 100755 (executable)
index 0000000..6ce62ba
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+#include "public/mobicore_driver_api.h"
+
+#include "main.h"
+#include "client.h"
+
+static enum mc_result convert(int err)
+{
+       switch (-err) {
+       case 0:
+               return MC_DRV_OK;
+       case ENOMSG:
+               return MC_DRV_NO_NOTIFICATION;
+       case EBADMSG:
+               return MC_DRV_ERR_NOTIFICATION;
+       case EAGAIN:
+               return MC_DRV_ERR_OUT_OF_RESOURCES;
+       case EHOSTDOWN:
+               return MC_DRV_ERR_INIT;
+       case ENODEV:
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+       case ENXIO:
+               return MC_DRV_ERR_UNKNOWN_SESSION;
+       case EPERM:
+               return MC_DRV_ERR_INVALID_OPERATION;
+       case EBADE:
+               return MC_DRV_ERR_INVALID_RESPONSE;
+       case ETIME:
+               return MC_DRV_ERR_TIMEOUT;
+       case ENOMEM:
+               return MC_DRV_ERR_NO_FREE_MEMORY;
+       case EUCLEAN:
+               return MC_DRV_ERR_FREE_MEMORY_FAILED;
+       case ENOTEMPTY:
+               return MC_DRV_ERR_SESSION_PENDING;
+       case EHOSTUNREACH:
+               return MC_DRV_ERR_DAEMON_UNREACHABLE;
+       case ENOENT:
+               return MC_DRV_ERR_INVALID_DEVICE_FILE;
+       case EINVAL:
+               return MC_DRV_ERR_INVALID_PARAMETER;
+       case EPROTO:
+               return MC_DRV_ERR_KERNEL_MODULE;
+       case ECOMM:
+               return MC_DRV_INFO_NOTIFICATION;
+       case EUNATCH:
+               return MC_DRV_ERR_NQ_FAILED;
+       case ERESTARTSYS:
+               return MC_DRV_ERR_INTERRUPTED_BY_SIGNAL;
+       default:
+               mc_dev_devel("error is %d", err);
+               return MC_DRV_ERR_UNKNOWN;
+       }
+}
+
+static inline bool is_valid_device(u32 device_id)
+{
+       return device_id == MC_DEVICE_ID_DEFAULT;
+}
+
+static struct tee_client *client;
+static int open_count;
+static DEFINE_MUTEX(dev_mutex);        /* Lock for the device */
+
+static bool clientlib_client_get(void)
+{
+       int ret = true;
+
+       mutex_lock(&dev_mutex);
+       if (!client)
+               ret = false;
+       else
+               client_get(client);
+
+       mutex_unlock(&dev_mutex);
+       return ret;
+}
+
+static void clientlib_client_put(void)
+{
+       mutex_lock(&dev_mutex);
+       if (client_put(client))
+               client = NULL;
+
+       mutex_unlock(&dev_mutex);
+}
+
+enum mc_result mc_open_device(u32 device_id)
+{
+       enum mc_result mc_result = MC_DRV_OK;
+       int ret;
+
+       /* Check parameters */
+       if (!is_valid_device(device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       mutex_lock(&dev_mutex);
+       /* Make sure TEE was started */
+       ret = mc_wait_tee_start();
+       if (ret) {
+               mc_dev_err(ret, "TEE failed to start, now or in the past");
+               mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+               goto end;
+       }
+
+       if (!open_count)
+               client = client_create(true);
+
+       if (client) {
+               open_count++;
+               mc_dev_devel("successfully opened the device");
+       } else {
+               mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+               mc_dev_err(-ENOMEM, "could not open device");
+       }
+
+end:
+       mutex_unlock(&dev_mutex);
+       return mc_result;
+}
+EXPORT_SYMBOL(mc_open_device);
+
+enum mc_result mc_close_device(u32 device_id)
+{
+       enum mc_result mc_result = MC_DRV_OK;
+
+       /* Check parameters */
+       if (!is_valid_device(device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       mutex_lock(&dev_mutex);
+       if (open_count > 1) {
+               open_count--;
+               goto end;
+       }
+
+       /* Check sessions and freeze client */
+       if (client_has_sessions(client)) {
+               mc_result = MC_DRV_ERR_SESSION_PENDING;
+               goto end;
+       }
+
+       /* Close the device */
+       client_close(client);
+       open_count = 0;
+
+end:
+       mutex_unlock(&dev_mutex);
+       clientlib_client_put();
+       return mc_result;
+}
+EXPORT_SYMBOL(mc_close_device);
+
+enum mc_result mc_open_session(struct mc_session_handle *session,
+                              const struct mc_uuid_t *uuid,
+                              u8 *tci_va, u32 tci_len)
+{
+       enum mc_result ret;
+
+       /* Check parameters */
+       if (!session || !uuid)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(
+               client_mc_open_session(client, uuid, (uintptr_t)tci_va, tci_len,
+                                      &session->session_id));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_open_session);
+
+enum mc_result mc_open_trustlet(struct mc_session_handle *session, u32 spid,
+                               u8 *ta_va, u32 ta_len, u8 *tci_va, u32 tci_len)
+{
+       enum mc_result ret;
+
+       /* Check parameters */
+       if (!session || !ta_va || !ta_len)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(
+               client_mc_open_trustlet(client, spid, (uintptr_t)ta_va, ta_len,
+                                       (uintptr_t)tci_va, tci_len,
+                                       &session->session_id));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_open_trustlet);
+
+enum mc_result mc_close_session(struct mc_session_handle *session)
+{
+       enum mc_result ret;
+
+       /* Check parameters */
+       if (!session)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(client_remove_session(client, session->session_id));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_close_session);
+
+enum mc_result mc_notify(struct mc_session_handle *session)
+{
+       enum mc_result ret;
+
+       /* Check parameters */
+       if (!session)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(client_notify_session(client, session->session_id));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_notify);
+
+enum mc_result mc_wait_notification(struct mc_session_handle *session,
+                                   s32 timeout)
+{
+       enum mc_result ret;
+
+       /* Check parameters */
+       if (!session)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       do {
+               ret = convert(client_waitnotif_session(client,
+                                                      session->session_id,
+                                                      timeout, false));
+       } while ((timeout == MC_INFINITE_TIMEOUT) &&
+                (ret == MC_DRV_ERR_INTERRUPTED_BY_SIGNAL));
+
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_wait_notification);
+
+enum mc_result mc_malloc_wsm(u32 device_id, u32 align, u32 len, u8 **wsm,
+                            u32 wsm_flags)
+{
+       enum mc_result ret;
+       uintptr_t va;
+
+       /* Check parameters */
+       if (!is_valid_device(device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!len)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!wsm)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(client_cbuf_create(client, len, &va, NULL));
+       if (ret == MC_DRV_OK)
+               *wsm = (u8 *)va;
+
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_malloc_wsm);
+
+enum mc_result mc_free_wsm(u32 device_id, u8 *wsm)
+{
+       enum mc_result ret;
+       uintptr_t va = (uintptr_t)wsm;
+
+       /* Check parameters */
+       if (!is_valid_device(device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(client_cbuf_free(client, va));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_free_wsm);
+
+enum mc_result mc_map(struct mc_session_handle *session, void *address,
+                     u32 length, struct mc_bulk_map *map_info)
+{
+       enum mc_result ret;
+       struct mc_ioctl_buffer buf = {
+               .va = (uintptr_t)address,
+               .len = length,
+               .flags = MC_IO_MAP_INPUT_OUTPUT,
+       };
+
+       /* Check parameters */
+       if (!session)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!map_info)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(client_mc_map(client, session->session_id, NULL, &buf));
+       if (ret == MC_DRV_OK) {
+               map_info->secure_virt_addr = buf.sva;
+               map_info->secure_virt_len = buf.len;
+       }
+
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_map);
+
+enum mc_result mc_unmap(struct mc_session_handle *session, void *address,
+                       struct mc_bulk_map *map_info)
+{
+       enum mc_result ret;
+       struct mc_ioctl_buffer buf = {
+               .va = (uintptr_t)address,
+               .flags = MC_IO_MAP_INPUT_OUTPUT,
+       };
+
+       /* Check parameters */
+       if (!session)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!map_info)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       buf.len = map_info->secure_virt_len;
+       buf.sva = map_info->secure_virt_addr;
+
+       ret = convert(client_mc_unmap(client, session->session_id, &buf));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_unmap);
+
+enum mc_result mc_get_session_error_code(struct mc_session_handle *session,
+                                        s32 *exit_code)
+{
+       enum mc_result ret;
+
+       /* Check parameters */
+       if (!session)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!is_valid_device(session->device_id))
+               return MC_DRV_ERR_UNKNOWN_DEVICE;
+
+       if (!exit_code)
+               return MC_DRV_ERR_INVALID_PARAMETER;
+
+       if (!clientlib_client_get())
+               return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
+
+       /* Call core api */
+       ret = convert(client_get_session_exitcode(client, session->session_id,
+                                                 exit_code));
+       clientlib_client_put();
+       return ret;
+}
+EXPORT_SYMBOL(mc_get_session_error_code);
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/clock.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/clock.c
new file mode 100755 (executable)
index 0000000..cd3d032
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "platform.h"
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+
+#include "main.h"
+#include "clock.h"
+
+static struct clk_context {
+       struct clk              *mc_ce_iface_clk;
+       struct clk              *mc_ce_core_clk;
+       struct clk              *mc_ce_bus_clk;
+       struct clk              *mc_ce_core_src_clk;
+       /* Clocks are managed by Linux Kernel. No need to do anything */
+       bool                    no_clock_support;
+} clk_ctx;
+
+int mc_clock_init(void)
+{
+       int ret;
+#ifdef MC_CLOCK_CORESRC_DEFAULTRATE
+       int core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
+#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
+       u32 of_core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
+#endif
+#endif
+#ifdef TT_CRYPTO_NO_CLOCK_SUPPORT_FEATURE
+       struct device_node *np;
+
+       np = of_find_node_by_name(NULL, TT_CLOCK_DEVICE_NAME);
+       if (!np) {
+               ret = -ENOENT;
+               mc_dev_err(ret, "cannot get clock device from DT");
+               goto error;
+       }
+
+       clk_ctx.no_clock_support =
+               of_property_read_bool(np, TT_CRYPTO_NO_CLOCK_SUPPORT_FEATURE);
+       if (clk_ctx.no_clock_support)
+               return 0;
+#endif /* TT_CRYPTO_NO_CLOCK_SUPPORT_FEATURE */
+
+#ifdef MC_CLOCK_CORESRC_DEFAULTRATE
+#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
+       /* Get core clk src */
+       clk_ctx.mc_ce_core_src_clk = clk_get(g_ctx.mcd, "core_clk_src");
+       if (IS_ERR(clk_ctx.mc_ce_core_src_clk)) {
+               ret = PTR_ERR(clk_ctx.mc_ce_core_src_clk);
+               mc_dev_err(ret, "cannot get core src clock");
+               goto error;
+       }
+#endif
+
+#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
+       ret = of_property_read_u32(g_ctx.mcd->of_node,
+                                  MC_CRYPTO_CLOCK_CORESRC_PROPNAME,
+                                  &of_core_src_rate);
+       if (ret) {
+               core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
+               mc_dev_info("cannot get clock frequency from DT, use %d",
+                           core_src_rate);
+       } else {
+               core_src_rate = of_core_src_rate;
+       }
+
+#endif /* MC_CRYPTO_CLOCK_CORESRC_PROPNAME */
+
+       ret = clk_set_rate(clk_ctx.mc_ce_core_src_clk, core_src_rate);
+       if (ret) {
+               clk_put(clk_ctx.mc_ce_core_src_clk);
+               clk_ctx.mc_ce_core_src_clk = NULL;
+               mc_dev_err(ret, "cannot set core clock src rate");
+               ret = -EIO;
+               goto error;
+       }
+#endif  /* MC_CLOCK_CORESRC_DEFAULTRATE */
+
+       /* Get core clk */
+       clk_ctx.mc_ce_core_clk = clk_get(g_ctx.mcd, "core_clk");
+       if (IS_ERR(clk_ctx.mc_ce_core_clk)) {
+               ret = PTR_ERR(clk_ctx.mc_ce_core_clk);
+               mc_dev_err(ret, "cannot get core clock");
+               goto error;
+       }
+
+       /* Get Interface clk */
+       clk_ctx.mc_ce_iface_clk = clk_get(g_ctx.mcd, "iface_clk");
+       if (IS_ERR(clk_ctx.mc_ce_iface_clk)) {
+               clk_put(clk_ctx.mc_ce_core_clk);
+               ret = PTR_ERR(clk_ctx.mc_ce_iface_clk);
+               mc_dev_err(ret, "cannot get iface clock");
+               goto error;
+       }
+
+       /* Get AXI clk */
+       clk_ctx.mc_ce_bus_clk = clk_get(g_ctx.mcd, "bus_clk");
+       if (IS_ERR(clk_ctx.mc_ce_bus_clk)) {
+               clk_put(clk_ctx.mc_ce_iface_clk);
+               clk_put(clk_ctx.mc_ce_core_clk);
+               ret = PTR_ERR(clk_ctx.mc_ce_bus_clk);
+               mc_dev_err(ret, "cannot get AXI bus clock");
+               goto error;
+       }
+
+       return 0;
+
+error:
+       clk_ctx.mc_ce_core_clk = NULL;
+       clk_ctx.mc_ce_iface_clk = NULL;
+       clk_ctx.mc_ce_bus_clk = NULL;
+       clk_ctx.mc_ce_core_src_clk = NULL;
+       return ret;
+}
+
+void mc_clock_exit(void)
+{
+       if (clk_ctx.no_clock_support)
+               return;
+
+       if (clk_ctx.mc_ce_iface_clk)
+               clk_put(clk_ctx.mc_ce_iface_clk);
+
+       if (clk_ctx.mc_ce_core_clk)
+               clk_put(clk_ctx.mc_ce_core_clk);
+
+       if (clk_ctx.mc_ce_bus_clk)
+               clk_put(clk_ctx.mc_ce_bus_clk);
+
+       if (clk_ctx.mc_ce_core_src_clk)
+               clk_put(clk_ctx.mc_ce_core_src_clk);
+}
+
+int mc_clock_enable(void)
+{
+       int ret;
+
+       if (clk_ctx.no_clock_support)
+               return 0;
+
+       ret = clk_prepare_enable(clk_ctx.mc_ce_core_clk);
+       if (ret) {
+               mc_dev_err(ret, "cannot enable core clock");
+               goto err_core;
+       }
+
+       ret = clk_prepare_enable(clk_ctx.mc_ce_iface_clk);
+       if (ret) {
+               mc_dev_err(ret, "cannot enable interface clock");
+               goto err_iface;
+       }
+
+       ret = clk_prepare_enable(clk_ctx.mc_ce_bus_clk);
+       if (ret) {
+               mc_dev_err(ret, "cannot enable bus clock");
+               goto err_bus;
+       }
+
+       return 0;
+
+err_bus:
+       clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
+err_iface:
+       clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
+err_core:
+       return ret;
+}
+
+void mc_clock_disable(void)
+{
+       if (clk_ctx.no_clock_support)
+               return;
+
+       if (clk_ctx.mc_ce_iface_clk)
+               clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
+
+       if (clk_ctx.mc_ce_core_clk)
+               clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
+
+       if (clk_ctx.mc_ce_bus_clk)
+               clk_disable_unprepare(clk_ctx.mc_ce_bus_clk);
+}
+
+#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/clock.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/clock.h
new file mode 100755 (executable)
index 0000000..c802fc0
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_CLOCK_H_
+#define _MC_CLOCK_H_
+
+#include "platform.h"  /* MC_CRYPTO_CLOCK_MANAGEMENT */
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+
+/* Initialize secure crypto clocks */
+int mc_clock_init(void);
+/* Free secure crypto clocks */
+void mc_clock_exit(void);
+/* Enable secure crypto clocks */
+int mc_clock_enable(void);
+/* Disable secure crypto clocks */
+void mc_clock_disable(void);
+
+#else /* MC_CRYPTO_CLOCK_MANAGEMENT */
+
+static inline int mc_clock_init(void)
+{
+       return 0;
+}
+
+static inline void mc_clock_exit(void)
+{
+}
+
+static inline int mc_clock_enable(void)
+{
+       return 0;
+}
+
+static inline void mc_clock_disable(void)
+{
+}
+
+#endif /* !MC_CRYPTO_CLOCK_MANAGEMENT */
+
+#endif /* _MC_CLOCK_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/fastcall.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/fastcall.c
new file mode 100644 (file)
index 0000000..ef2645a
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>       /* local_clock */
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/clock.h> /* local_clock */
+#endif
+
+#include "mci/mcifc.h"
+
+#include "platform.h"  /* MC_SMC_FASTCALL */
+#include "main.h"
+#include "fastcall.h"
+
+/* Use the arch_extension sec pseudo op before switching to secure world */
+#if defined(__GNUC__) && \
+       defined(__GNUC_MINOR__) && \
+       defined(__GNUC_PATCHLEVEL__) && \
+       ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \
+       >= 40502
+#ifndef CONFIG_ARM64
+#define MC_ARCH_EXTENSION_SEC
+#endif
+#endif
+
+/* Base for all fastcalls, do not use outside of other structs */
+union fc_common {
+       struct {
+               u32 cmd;
+               u32 param[3];
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+               u32 param[2];
+       } out;
+};
+
+union fc_init {
+       union fc_common common;
+
+       struct {
+               u32 cmd;
+               u32 base;
+               u32 nq_info;
+               u32 mcp_info;
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+               u32 flags;
+               u32 rfu;
+       } out;
+};
+
+union fc_info {
+       union fc_common common;
+
+       struct {
+               u32 cmd;
+               u32 ext_info_id;
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+               u32 state;
+               u32 ext_info;
+       } out;
+};
+
+union fc_trace {
+       union fc_common common;
+
+       struct {
+               u32 cmd;
+               u32 buffer_low;
+               u32 buffer_high;
+               u32 size;
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+       } out;
+};
+
+union fc_switch_core {
+       union fc_common common;
+
+       struct {
+               u32 cmd;
+               u32 core_id;
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+               u32 state;
+               u32 ext_info;
+       } out;
+};
+
+union fc_nsiq {
+       union fc_common common;
+
+       struct {
+               u32 cmd;
+               u32 debug_ret;
+               u32 debug_session_id;
+               u32 debug_payload;
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+       } out;
+};
+
+union fc_yield {
+       union fc_common common;
+
+       struct {
+               u32 cmd;
+               u32 debug_ret;
+               u32 debug_timeslice;
+       } in;
+
+       struct {
+               u32 resp;
+               u32 ret;
+       } out;
+};
+
+/* Structure to log SMC calls */
+struct smc_log_entry {
+       u64 cpu_clk;
+       union fc_common fc;
+};
+
+#define SMC_LOG_SIZE 1024
+static struct smc_log_entry smc_log[SMC_LOG_SIZE];
+static int smc_log_index;
+
+/*
+ * convert fast call return code to linux driver module error code
+ */
+static int convert_fc_ret(u32 ret)
+{
+       switch (ret) {
+       case MC_FC_RET_OK:
+               return 0;
+       case MC_FC_RET_ERR_INVALID:
+               return -EINVAL;
+       case MC_FC_RET_ERR_ALREADY_INITIALIZED:
+               return -EBUSY;
+       default:
+               return -EFAULT;
+       }
+}
+
+/*
+ * __smc() - fast call to MobiCore
+ *
+ * @data: pointer to fast call data
+ */
+static inline int __smc(union fc_common *fc, const char *func)
+{
+       int ret = 0;
+
+       /* Log SMC call */
+       smc_log[smc_log_index].cpu_clk = local_clock();
+       smc_log[smc_log_index].fc = *fc;
+       if (++smc_log_index >= SMC_LOG_SIZE)
+               smc_log_index = 0;
+
+#ifdef MC_SMC_FASTCALL
+       ret = smc_fastcall(fc, sizeof(*fc));
+#else /* MC_SMC_FASTCALL */
+       {
+#ifdef CONFIG_ARM64
+               /* SMC expect values in x0-x3 */
+               register u64 reg0 __asm__("x0") = fc->in.cmd;
+               register u64 reg1 __asm__("x1") = fc->in.param[0];
+               register u64 reg2 __asm__("x2") = fc->in.param[1];
+               register u64 reg3 __asm__("x3") = fc->in.param[2];
+
+               /*
+                * According to AARCH64 SMC Calling Convention (ARM DEN 0028A),
+                * section 3.1: registers x4-x17 are unpredictable/scratch
+                * registers.  So we have to make sure that the compiler does
+                * not allocate any of those registers by letting him know that
+                * the asm code might clobber them.
+                */
+               __asm__ volatile (
+                       "smc #0\n"
+                       : "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
+                       :
+                       : "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
+                         "x12", "x13", "x14", "x15", "x16", "x17"
+               );
+#else /* CONFIG_ARM64 */
+               /* SMC expect values in r0-r3 */
+               register u32 reg0 __asm__("r0") = fc->in.cmd;
+               register u32 reg1 __asm__("r1") = fc->in.param[0];
+               register u32 reg2 __asm__("r2") = fc->in.param[1];
+               register u32 reg3 __asm__("r3") = fc->in.param[2];
+
+               __asm__ volatile (
+#ifdef MC_ARCH_EXTENSION_SEC
+                       /*
+                        * This pseudo op is supported and required from
+                        * binutils 2.21 on
+                        */
+                       ".arch_extension sec\n"
+#endif /* MC_ARCH_EXTENSION_SEC */
+                       "smc #0\n"
+                       : "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
+               );
+
+#endif /* !CONFIG_ARM64 */
+
+               /* set response */
+               fc->out.resp     = reg0;
+               fc->out.ret      = reg1;
+               fc->out.param[0] = reg2;
+               fc->out.param[1] = reg3;
+       }
+#endif /* !MC_SMC_FASTCALL */
+
+       if (ret) {
+               mc_dev_err(ret, "failed for %s", func);
+       } else {
+               ret = convert_fc_ret(fc->out.ret);
+               if (ret)
+                       mc_dev_err(ret, "%s failed (%x)", func, fc->out.ret);
+       }
+
+       return ret;
+}
+
+#define smc(__fc__) __smc(__fc__.common, __func__)
+
+int fc_init(uintptr_t addr, ptrdiff_t off, size_t q_len, size_t buf_len)
+{
+       union fc_init fc;
+#ifdef CONFIG_ARM64
+       u32 addr_high = (u32)(addr >> 32);
+#else
+       u32 addr_high = 0;
+#endif
+
+       /* Call the INIT fastcall to setup MobiCore initialization */
+       memset(&fc, 0, sizeof(fc));
+       fc.in.cmd = MC_FC_INIT;
+       /* base address of mci buffer PAGE_SIZE (default is 4KB) aligned */
+       fc.in.base = (u32)addr;
+       /* notification buffer start/length [16:16] [start, length] */
+       fc.in.nq_info = (u32)(((addr_high & 0xFFFF) << 16) | (q_len & 0xFFFF));
+       /* mcp buffer start/length [16:16] [start, length] */
+       fc.in.mcp_info = (u32)((off << 16) | (buf_len & 0xFFFF));
+       mc_dev_devel("cmd=%d, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
+                    fc.in.cmd, fc.in.base, fc.in.nq_info,
+                    fc.in.mcp_info);
+       return smc(&fc);
+}
+
+int fc_info(u32 ext_info_id, u32 *state, u32 *ext_info)
+{
+       union fc_info fc;
+       int ret = 0;
+
+       memset(&fc, 0, sizeof(fc));
+       fc.in.cmd = MC_FC_INFO;
+       fc.in.ext_info_id = ext_info_id;
+       ret = smc(&fc);
+       if (ret) {
+               if (state)
+                       *state = MC_STATUS_NOT_INITIALIZED;
+
+               if (ext_info)
+                       *ext_info = 0;
+
+               mc_dev_err(ret, "failed for index %d", ext_info_id);
+       } else {
+               if (state)
+                       *state = fc.out.state;
+
+               if (ext_info)
+                       *ext_info = fc.out.ext_info;
+       }
+
+       return ret;
+}
+
+int fc_trace_init(phys_addr_t buffer, u32 size)
+{
+       union fc_trace fc;
+
+       memset(&fc, 0, sizeof(fc));
+       fc.in.cmd = MC_FC_MEM_TRACE;
+       fc.in.buffer_low = (u32)buffer;
+#ifdef CONFIG_ARM64
+       fc.in.buffer_high = (u32)(buffer >> 32);
+#endif
+       fc.in.size = size;
+       return smc(&fc);
+}
+
+int fc_trace_deinit(void)
+{
+       return fc_trace_init(0, 0);
+}
+
+/* sid, payload only used for debug purpose */
+int fc_nsiq(u32 session_id, u32 payload)
+{
+       union fc_nsiq fc;
+
+       memset(&fc, 0, sizeof(fc));
+       fc.in.cmd = MC_SMC_N_SIQ;
+       fc.in.debug_session_id = session_id;
+       fc.in.debug_payload = payload;
+       return smc(&fc);
+}
+
+/* timeslice only used for debug purpose */
+int fc_yield(u32 timeslice)
+{
+       union fc_yield fc;
+
+       memset(&fc, 0, sizeof(fc));
+       fc.in.cmd = MC_SMC_N_YIELD;
+       fc.in.debug_timeslice = timeslice;
+       return smc(&fc);
+}
+
+int fc_switch_core(int core_id)
+{
+       union fc_switch_core fc;
+
+       memset(&fc, 0, sizeof(fc));
+       fc.in.cmd = MC_FC_SWAP_CPU;
+       fc.in.core_id = core_id;
+       return smc(&fc);
+}
+
+static int show_smc_log_entry(struct kasnprintf_buf *buf,
+                             struct smc_log_entry *entry)
+{
+       return kasnprintf(buf, "%20llu %10d 0x%08x 0x%08x 0x%08x\n",
+                         entry->cpu_clk, (s32)entry->fc.in.cmd,
+                         entry->fc.in.param[0], entry->fc.in.param[1],
+                         entry->fc.in.param[2]);
+}
+
+/*
+ * Dump SMC log circular buffer, starting from oldest command. It is assumed
+ * nothing goes in any more at this point.
+ */
+int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf)
+{
+       int i, ret = 0;
+
+       ret = kasnprintf(buf, "%20s %10s %-10s %-10s %-10s\n",
+                        "CPU clock", "command", "param1", "param2", "param3");
+       if (ret < 0)
+               return ret;
+
+       if (smc_log[smc_log_index].cpu_clk)
+               /* Buffer has wrapped around, dump end (oldest records) */
+               for (i = smc_log_index; i < SMC_LOG_SIZE; i++) {
+                       ret = show_smc_log_entry(buf, &smc_log[i]);
+                       if (ret < 0)
+                               return ret;
+               }
+
+       /* Dump first records */
+       for (i = 0; i < smc_log_index; i++) {
+               ret = show_smc_log_entry(buf, &smc_log[i]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return ret;
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/fastcall.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/fastcall.h
new file mode 100755 (executable)
index 0000000..c93e09a
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TBASE_FASTCALL_H_
+#define _TBASE_FASTCALL_H_
+
+int fc_init(uintptr_t base_pa, ptrdiff_t off, size_t q_len, size_t buf_len);
+int fc_info(u32 ext_info_id, u32 *state, u32 *ext_info);
+int fc_trace_init(phys_addr_t buffer, u32 size);
+int fc_trace_deinit(void);
+int fc_nsiq(u32 session_id, u32 payload);
+int fc_yield(u32 timeslice);
+int fc_switch_core(int core_id);
+
+int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf);
+
+#endif /* _TBASE_FASTCALL_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/iwp.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/iwp.c
new file mode 100755 (executable)
index 0000000..68c3417
--- /dev/null
@@ -0,0 +1,1207 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/freezer.h>
+#include <asm/barrier.h>
+#include <linux/irq.h>
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/clock.h> /* local_clock */
+#endif
+
+#include "public/GP/tee_client_api.h"  /* GP error codes/origins FIXME move */
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "mci/mcimcp.h"
+#include "mci/mcifc.h"
+#include "mci/mcinq.h"         /* SID_MCP */
+#include "mci/mcitime.h"       /* struct mcp_time */
+#include "mci/mciiwp.h"
+
+#include "main.h"
+#include "admin.h"              /* tee_object* for 'blob' */
+#include "mmu.h"                /* MMU for 'blob' */
+#include "nq.h"
+#include "xen_fe.h"
+#include "iwp.h"
+
+#define IWP_RETRIES            5
+#define IWP_TIMEOUT            10
+#define INVALID_IWS_SLOT       0xFFFFFFFF
+
+/* Macros */
+#define _TEEC_GET_PARAM_TYPE(t, i) (((t) >> (4 * (i))) & 0xF)
+
+/* Parameter number */
+#define _TEEC_PARAMETER_NUMBER 4
+
+struct iws {
+       struct list_head list;
+       u64 slot;
+};
+
+static struct {
+       bool iwp_dead;
+       struct interworld_session *iws;
+       /* Interworld lists lock */
+       struct mutex            iws_list_lock;
+       /* InterWorld lists */
+       struct iws              *iws_list_pool;
+       struct list_head        free_iws;
+       struct list_head        allocd_iws;
+       /* Sessions */
+       struct mutex            sessions_lock;
+       struct list_head        sessions;
+       /* TEE bad state detection */
+       struct notifier_block   tee_stop_notifier;
+       /* Log of last commands */
+#define LAST_CMDS_SIZE 256
+       struct mutex            last_cmds_mutex;        /* Log protection */
+       struct command_info {
+               u64                     cpu_clk;        /* Kernel time */
+               pid_t                   pid;            /* Caller PID */
+               u32                     id;             /* IWP command ID */
+               u32                     session_id;
+               char                    uuid_str[34];
+               enum state {
+                       UNUSED,         /* Unused slot */
+                       PENDING,        /* Previous command in progress */
+                       SENT,           /* Waiting for response */
+                       COMPLETE,       /* Got result */
+                       FAILED,         /* Something went wrong */
+               }                       state;  /* Command processing state */
+               struct gp_return        result; /* Command result */
+               int                     errno;  /* Return code */
+       }                               last_cmds[LAST_CMDS_SIZE];
+       int                             last_cmds_index;
+} l_ctx;
+
+static void iwp_notif_handler(u32 id, u32 payload)
+{
+       struct iwp_session *iwp_session = NULL, *candidate;
+
+       mutex_lock(&l_ctx.sessions_lock);
+       list_for_each_entry(candidate, &l_ctx.sessions, list) {
+               mc_dev_devel("candidate->slot [%08llx]", candidate->slot);
+               /* If id is SID_CANCEL_OPERATION, there is pseudo session */
+               if (candidate->slot == payload &&
+                   (id != SID_CANCEL_OPERATION || candidate->sid == id)) {
+                       iwp_session = candidate;
+                       break;
+               }
+       }
+       mutex_unlock(&l_ctx.sessions_lock);
+
+       if (!iwp_session) {
+               mc_dev_err(-ENXIO, "IWP no session found for id=0x%x slot=0x%x",
+                          id, payload);
+               return;
+       }
+
+       mc_dev_devel("IWP: iwp_session [%p] id [%08x] slot [%08x]",
+                    iwp_session, id, payload);
+       nq_session_state_update(&iwp_session->nq_session, NQ_NOTIF_RECEIVED);
+       complete(&iwp_session->completion);
+}
+
+void iwp_session_init(struct iwp_session *iwp_session,
+                     const struct identity *identity)
+{
+       nq_session_init(&iwp_session->nq_session, true);
+       iwp_session->sid = SID_INVALID;
+       iwp_session->slot = INVALID_IWS_SLOT;
+       INIT_LIST_HEAD(&iwp_session->list);
+       mutex_init(&iwp_session->notif_wait_lock);
+       init_completion(&iwp_session->completion);
+       mutex_init(&iwp_session->iws_lock);
+       iwp_session->state = IWP_SESSION_RUNNING;
+       if (identity)
+               iwp_session->client_identity = *identity;
+}
+
+static u64 iws_slot_get(void)
+{
+       struct iws *iws;
+       u64 slot = INVALID_IWS_SLOT;
+
+       if (is_xen_domu())
+               return (uintptr_t)kzalloc(sizeof(*iws), GFP_KERNEL);
+
+       mutex_lock(&l_ctx.iws_list_lock);
+       if (!list_empty(&l_ctx.free_iws)) {
+               iws = list_first_entry(&l_ctx.free_iws, struct iws, list);
+               slot = iws->slot;
+               list_move(&iws->list, &l_ctx.allocd_iws);
+               atomic_inc(&g_ctx.c_slots);
+               mc_dev_devel("got slot %llu", slot);
+       }
+       mutex_unlock(&l_ctx.iws_list_lock);
+       return slot;
+}
+
+/* Passing INVALID_IWS_SLOT is supported */
+static void iws_slot_put(u64 slot)
+{
+       struct iws *iws;
+       bool found = false;
+
+       if (is_xen_domu()) {
+               kfree((void *)(uintptr_t)slot);
+               return;
+       }
+
+       mutex_lock(&l_ctx.iws_list_lock);
+       list_for_each_entry(iws, &l_ctx.allocd_iws, list) {
+               if (slot == iws->slot) {
+                       list_move(&iws->list, &l_ctx.free_iws);
+                       atomic_dec(&g_ctx.c_slots);
+                       found = true;
+                       mc_dev_devel("put slot %llu", slot);
+                       break;
+               }
+       }
+       mutex_unlock(&l_ctx.iws_list_lock);
+
+       if (!found)
+               mc_dev_err(-EINVAL, "slot %llu not found", slot);
+}
+
+static inline struct interworld_session *slot_to_iws(u64 slot)
+{
+       if (is_xen_domu())
+               return (struct interworld_session *)(uintptr_t)slot;
+
+       return (struct interworld_session *)((uintptr_t)l_ctx.iws + (u32)slot);
+}
+
+/*
+ * IWP command functions
+ */
+static int iwp_cmd(struct iwp_session *iwp_session, u32 id,
+                  struct teec_uuid *uuid, bool killable)
+{
+       struct command_info *cmd_info;
+       int ret;
+
+       /* Initialize MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+       memset(cmd_info, 0, sizeof(*cmd_info));
+       cmd_info->cpu_clk = local_clock();
+       cmd_info->pid = current->pid;
+       cmd_info->id = id;
+       if (id == SID_OPEN_SESSION || id == SID_OPEN_TA) {
+               /* Keep UUID because it's an 'open session' cmd */
+               const char *cuuid = (const char *)uuid;
+               size_t i;
+
+               cmd_info->uuid_str[0] = ' ';
+               for (i = 0; i < sizeof(*uuid); i++) {
+                       snprintf(&cmd_info->uuid_str[1 + i * 2], 3, "%02x",
+                                cuuid[i]);
+               }
+       } else if (id == SID_CANCEL_OPERATION) {
+               struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+
+               if (iws)
+                       cmd_info->session_id = iws->session_handle;
+               else
+                       cmd_info->session_id = 0;
+       } else {
+               cmd_info->session_id = iwp_session->sid;
+       }
+
+       cmd_info->state = PENDING;
+       iwp_set_ret(0, &cmd_info->result);
+       if (++l_ctx.last_cmds_index >= LAST_CMDS_SIZE)
+               l_ctx.last_cmds_index = 0;
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+
+       if (l_ctx.iwp_dead)
+               return -EHOSTUNREACH;
+
+       mc_dev_devel("psid [%08x], sid [%08x]", id, iwp_session->sid);
+       ret = nq_session_notify(&iwp_session->nq_session, id,
+                               iwp_session->slot);
+       if (ret) {
+               mc_dev_err(ret, "sid [%08x]: sending failed", iwp_session->sid);
+               mutex_lock(&l_ctx.last_cmds_mutex);
+               cmd_info->errno = ret;
+               cmd_info->state = FAILED;
+               mutex_unlock(&l_ctx.last_cmds_mutex);
+               return ret;
+       }
+
+       /* Update MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       cmd_info->state = SENT;
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+
+       /*
+        * NB: Wait cannot be interruptible as we need an answer from SWd. It's
+        * up to the user-space to request a cancellation (for open session and
+        * command invocation operations.)
+        *
+        * We do provide a way out to make applications killable in some cases
+        * though.
+        */
+       if (killable) {
+               ret = wait_for_completion_killable(&iwp_session->completion);
+               if (ret) {
+                       iwp_request_cancellation(iwp_session->slot);
+                       /* Make sure the SWd did not die in the meantime */
+                       if (l_ctx.iwp_dead)
+                               return -EHOSTUNREACH;
+
+                       wait_for_completion(&iwp_session->completion);
+               }
+       } else {
+               wait_for_completion(&iwp_session->completion);
+       }
+
+       if (l_ctx.iwp_dead)
+               return -EHOSTUNREACH;
+
+       /* Update MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       {
+               struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+
+               cmd_info->result.origin = iws->return_origin;
+               cmd_info->result.value = iws->status;
+               if (id == SID_OPEN_SESSION || id == SID_OPEN_TA)
+                       cmd_info->session_id = iws->session_handle;
+       }
+       cmd_info->state = COMPLETE;
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+       nq_session_state_update(&iwp_session->nq_session, NQ_NOTIF_CONSUMED);
+       return 0;
+}
+
+/*
+ * Convert errno into GP error and set origin to COMMS.
+ * Note: -ECHILD is used to tell the caller that we have a GP error in value, so
+ * we return 0 on success and -ECHILD on error. If -ECHILD is given, we assume
+ * that value is already correctly set.
+ */
+int iwp_set_ret(int ret, struct gp_return *gp_ret)
+{
+       if (ret == -ECHILD) {
+               /* Already set */
+               return ret;
+       }
+
+       gp_ret->origin = TEEC_ORIGIN_COMMS;
+       switch (ret) {
+       case 0:
+               gp_ret->origin = TEEC_ORIGIN_TRUSTED_APP;
+               gp_ret->value = TEEC_SUCCESS;
+               return 0;
+       case -EACCES:
+               gp_ret->value = TEEC_ERROR_ACCESS_DENIED;
+               break;
+       case -EBUSY:
+               gp_ret->value = TEEC_ERROR_BUSY;
+               break;
+       case -ECANCELED:
+               gp_ret->value = TEEC_ERROR_CANCEL;
+               break;
+       case -EINVAL:
+       case -EFAULT:
+               gp_ret->value = TEEC_ERROR_BAD_PARAMETERS;
+               break;
+       case -EKEYREJECTED:
+               gp_ret->value = TEEC_ERROR_SECURITY;
+               break;
+       case -ENOENT:
+               gp_ret->value = TEEC_ERROR_ITEM_NOT_FOUND;
+               break;
+       case -ENOMEM:
+               gp_ret->value = TEEC_ERROR_OUT_OF_MEMORY;
+               break;
+       case -EHOSTUNREACH:
+               /* Tee crashed */
+               gp_ret->value = TEEC_ERROR_TARGET_DEAD;
+               break;
+       case -ENXIO:
+               /* Session not found or not running */
+               gp_ret->value = TEEC_ERROR_BAD_STATE;
+               break;
+       default:
+               gp_ret->value = TEEC_ERROR_GENERIC;
+       }
+       return -ECHILD;
+}
+
+int iwp_register_shared_mem(struct tee_mmu *mmu, u32 *sva,
+                           struct gp_return *gp_ret)
+{
+       int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_gp_register_shared_mem(mmu, sva, gp_ret);
+#endif
+
+       ret = mcp_map(SID_MEMORY_REFERENCE, mmu, sva);
+       /* iwp_set_ret would override the origin if called after */
+       ret = iwp_set_ret(ret, gp_ret);
+       if (ret)
+               gp_ret->origin = TEEC_ORIGIN_TEE;
+
+       return ret;
+}
+
+int iwp_release_shared_mem(struct mcp_buffer_map *map)
+{
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_gp_release_shared_mem(map);
+#endif
+
+       return mcp_unmap(SID_MEMORY_REFERENCE, map);
+}
+
+static int iwp_operation_to_iws(struct gp_operation *operation,
+                               struct interworld_session *iws,
+                               struct mc_ioctl_buffer *bufs,
+                               struct gp_shared_memory **parents)
+{
+       int param_type, i;
+
+       iws->param_types = 0;
+       for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+               /* Reset reference for temporary memory */
+               bufs[i].va = 0;
+               /* Reset reference for registered memory */
+               parents[i] = NULL;
+               param_type = _TEEC_GET_PARAM_TYPE(operation->param_types, i);
+
+               switch (param_type) {
+               case TEEC_NONE:
+               case TEEC_VALUE_OUTPUT:
+                       break;
+               case TEEC_VALUE_INPUT:
+               case TEEC_VALUE_INOUT:
+                       iws->params[i].value.a = operation->params[i].value.a;
+                       iws->params[i].value.b = operation->params[i].value.b;
+                       break;
+               case TEEC_MEMREF_TEMP_INPUT:
+               case TEEC_MEMREF_TEMP_OUTPUT:
+               case TEEC_MEMREF_TEMP_INOUT:
+                       if (operation->params[i].tmpref.buffer) {
+                               /* Prepare buffer to map */
+                               bufs[i].va = operation->params[i].tmpref.buffer;
+                               bufs[i].len = operation->params[i].tmpref.size;
+                               if (param_type == TEEC_MEMREF_TEMP_INPUT)
+                                       bufs[i].flags = MC_IO_MAP_INPUT;
+                               else if (param_type == TEEC_MEMREF_TEMP_OUTPUT)
+                                       bufs[i].flags = MC_IO_MAP_OUTPUT;
+                               else
+                                       bufs[i].flags = MC_IO_MAP_INPUT_OUTPUT;
+                       } else {
+                               if (operation->params[i].tmpref.size)
+                                       return -EINVAL;
+
+                               /* Null buffer, won't get mapped */
+                               iws->params[i].tmpref.physical_address = 0;
+                               iws->params[i].tmpref.size = 0;
+                               iws->params[i].tmpref.offset = 0;
+                               iws->params[i].tmpref.wsm_type = WSM_INVALID;
+                       }
+                       break;
+               case TEEC_MEMREF_WHOLE:
+                       parents[i] = &operation->params[i].memref.parent;
+                       iws->params[i].memref.offset = 0;
+                       iws->params[i].memref.size =
+                               operation->params[i].memref.parent.size;
+                       break;
+               case TEEC_MEMREF_PARTIAL_INPUT:
+               case TEEC_MEMREF_PARTIAL_OUTPUT:
+               case TEEC_MEMREF_PARTIAL_INOUT:
+                       parents[i] = &operation->params[i].memref.parent;
+                       iws->params[i].memref.offset =
+                               operation->params[i].memref.offset;
+                       iws->params[i].memref.size =
+                               operation->params[i].memref.size;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               iws->param_types |= (u32)(param_type << (i * 4));
+       }
+
+       return 0;
+}
+
+static inline void iwp_iws_set_tmpref(struct interworld_session *iws, int i,
+                                     const struct mcp_buffer_map *map)
+{
+       iws->params[i].tmpref.physical_address = map->addr;
+       iws->params[i].tmpref.size = map->length;
+       iws->params[i].tmpref.offset = map->offset;
+       iws->params[i].tmpref.wsm_type = map->type;
+}
+
+static inline void iwp_iws_set_memref(struct interworld_session *iws, int i,
+                                     u32 sva)
+{
+       iws->params[i].memref.memref_handle = sva;
+}
+
+static inline void iwp_iws_set_refs(struct interworld_session *iws,
+                                   const struct iwp_buffer_map *maps)
+{
+       int i;
+
+       for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++)
+               if (maps[i].sva)
+                       iwp_iws_set_memref(iws, i, maps[i].sva);
+               else if (maps[i].map.addr)
+                       iwp_iws_set_tmpref(iws, i, &maps[i].map);
+}
+
+static void iwp_iws_to_operation(const struct interworld_session *iws,
+                                struct gp_operation *operation)
+{
+       int i;
+
+       for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+               switch (_TEEC_GET_PARAM_TYPE(operation->param_types, i)) {
+               case TEEC_VALUE_OUTPUT:
+               case TEEC_VALUE_INOUT:
+                       operation->params[i].value.a = iws->params[i].value.a;
+                       operation->params[i].value.b = iws->params[i].value.b;
+                       break;
+               case TEEC_MEMREF_TEMP_OUTPUT:
+               case TEEC_MEMREF_TEMP_INOUT:
+                       operation->params[i].tmpref.size =
+                               iws->params[i].tmpref.size;
+                       break;
+               case TEEC_MEMREF_WHOLE:
+                       if (operation->params[i].memref.parent.flags !=
+                           TEEC_MEM_INPUT)
+                               operation->params[i].memref.size =
+                                       iws->params[i].tmpref.size;
+                       break;
+               case TEEC_MEMREF_PARTIAL_OUTPUT:
+               case TEEC_MEMREF_PARTIAL_INOUT:
+                       operation->params[i].memref.size =
+                               iws->params[i].tmpref.size;
+                       break;
+               case TEEC_NONE:
+               case TEEC_VALUE_INPUT:
+               case TEEC_MEMREF_TEMP_INPUT:
+               case TEEC_MEMREF_PARTIAL_INPUT:
+                       break;
+               default:
+                       /* Error caught by iwp_operation_to_iws() */
+                       break;
+               }
+       }
+}
+
+static inline void mcuuid_to_tee_uuid(const struct mc_uuid_t *in,
+                                     struct teec_uuid *out)
+{
+       /*
+        * Warning: this code works only on little-endian platforms.
+        */
+       out->time_low = in->value[3] +
+               (in->value[2] << 8) +
+               (in->value[1] << 16) +
+               (in->value[0] << 24);
+       out->time_mid = in->value[5] +
+               (in->value[4] << 8);
+       out->time_hi_and_version = in->value[7] +
+               (in->value[6] << 8);
+       memcpy(out->clock_seq_and_node, in->value + 8, 8);
+}
+
+static const char *origin_to_string(u32 origin)
+{
+       switch (origin) {
+       case TEEC_ORIGIN_API:
+               return "API";
+       case TEEC_ORIGIN_COMMS:
+               return "COMMS";
+       case TEEC_ORIGIN_TEE:
+               return "TEE";
+       case TEEC_ORIGIN_TRUSTED_APP:
+               return "TRUSTED_APP";
+       }
+       return "UNKNOWN";
+}
+
+static const char *value_to_string(u32 value)
+{
+       switch (value) {
+       case TEEC_SUCCESS:
+               return "SUCCESS";
+       case TEEC_ERROR_GENERIC:
+               return "GENERIC";
+       case TEEC_ERROR_ACCESS_DENIED:
+               return "ACCESS_DENIED";
+       case TEEC_ERROR_CANCEL:
+               return "CANCEL";
+       case TEEC_ERROR_ACCESS_CONFLICT:
+               return "ACCESS_CONFLICT";
+       case TEEC_ERROR_EXCESS_DATA:
+               return "EXCESS_DATA";
+       case TEEC_ERROR_BAD_FORMAT:
+               return "BAD_FORMAT";
+       case TEEC_ERROR_BAD_PARAMETERS:
+               return "BAD_PARAMETERS";
+       case TEEC_ERROR_BAD_STATE:
+               return "BAD_STATE";
+       case TEEC_ERROR_ITEM_NOT_FOUND:
+               return "ITEM_NOT_FOUND";
+       case TEEC_ERROR_NOT_IMPLEMENTED:
+               return "NOT_IMPLEMENTED";
+       case TEEC_ERROR_NOT_SUPPORTED:
+               return "NOT_SUPPORTED";
+       case TEEC_ERROR_NO_DATA:
+               return "NO_DATA";
+       case TEEC_ERROR_OUT_OF_MEMORY:
+               return "OUT_OF_MEMORY";
+       case TEEC_ERROR_BUSY:
+               return "BUSY";
+       case TEEC_ERROR_COMMUNICATION:
+               return "COMMUNICATION";
+       case TEEC_ERROR_SECURITY:
+               return "SECURITY";
+       case TEEC_ERROR_SHORT_BUFFER:
+               return "SHORT_BUFFER";
+       case TEEC_ERROR_TARGET_DEAD:
+               return "TARGET_DEAD";
+       case TEEC_ERROR_STORAGE_NO_SPACE:
+               return "STORAGE_NO_SPACE";
+       }
+       return NULL;
+}
+
+static const char *cmd_to_string(u32 id)
+{
+       switch (id) {
+       case SID_OPEN_SESSION:
+               return "open session";
+       case SID_INVOKE_COMMAND:
+               return "invoke command";
+       case SID_CLOSE_SESSION:
+               return "close session";
+       case SID_CANCEL_OPERATION:
+               return "cancel operation";
+       case SID_MEMORY_REFERENCE:
+               return "memory reference";
+       case SID_OPEN_TA:
+               return "open TA";
+       case SID_REQ_TA:
+               return "request TA";
+       }
+       return "unknown";
+}
+
+static const char *state_to_string(enum iwp_session_state state)
+{
+       switch (state) {
+       case IWP_SESSION_RUNNING:
+               return "running";
+       case IWP_SESSION_CLOSE_REQUESTED:
+               return "close requested";
+       case IWP_SESSION_CLOSED:
+               return "closed";
+       }
+       return "error";
+}
+
+int iwp_open_session_prepare(
+       struct iwp_session *iwp_session,
+       struct gp_operation *operation,
+       struct mc_ioctl_buffer *bufs,
+       struct gp_shared_memory **parents,
+       struct gp_return *gp_ret)
+{
+       struct interworld_session *iws;
+       u64 slot, op_slot;
+       int ret = 0;
+
+       /* Get session final slot */
+       slot = iws_slot_get();
+       mc_dev_devel("slot [%08llx]", slot);
+       if (slot == INVALID_IWS_SLOT) {
+               ret = -ENOMEM;
+               mc_dev_err(ret, "can't get slot");
+               return iwp_set_ret(ret, gp_ret);
+       }
+
+       /* Get session temporary slot */
+       op_slot = iws_slot_get();
+       mc_dev_devel("op_slot [%08llx]", op_slot);
+       if (op_slot == INVALID_IWS_SLOT) {
+               ret = -ENOMEM;
+               mc_dev_err(ret, "can't get op_slot");
+               iws_slot_put(slot);
+               return iwp_set_ret(ret, gp_ret);
+       }
+
+       mutex_lock(&iwp_session->iws_lock);
+
+       /* Prepare final session: refer to temporary slot in final one */
+       iwp_session->slot = slot;
+       iws = slot_to_iws(slot);
+       memset(iws, 0, sizeof(*iws));
+
+       /* Prepare temporary session */
+       iwp_session->op_slot = op_slot;
+       iws = slot_to_iws(op_slot);
+       memset(iws, 0, sizeof(*iws));
+
+       if (operation) {
+               ret = iwp_operation_to_iws(operation, iws, bufs, parents);
+               if (ret)
+                       iwp_open_session_abort(iwp_session);
+       }
+
+       return iwp_set_ret(ret, gp_ret);
+}
+
+void iwp_open_session_abort(struct iwp_session *iwp_session)
+{
+       iws_slot_put(iwp_session->slot);
+       iws_slot_put(iwp_session->op_slot);
+       mutex_unlock(&iwp_session->iws_lock);
+}
+
+/*
+ * Like open session except we pass the TA blob from NWd to SWd
+ */
+int iwp_open_session(
+       struct iwp_session *iwp_session,
+       const struct mc_uuid_t *uuid,
+       struct gp_operation *operation,
+       const struct iwp_buffer_map *maps,
+       struct interworld_session *iws_in,
+       struct tee_mmu **mmus,
+       struct gp_return *gp_ret)
+{
+       struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+       struct interworld_session *op_iws = slot_to_iws(iwp_session->op_slot);
+       struct tee_object *obj = NULL;
+       struct tee_mmu *obj_mmu = NULL;
+       struct mcp_buffer_map obj_map;
+       int ret;
+
+       /* Operation is NULL when called from Xen BE */
+       if (operation) {
+               /* Login info */
+               op_iws->login = iwp_session->client_identity.login_type;
+               mc_dev_devel("iws->login [%08x]", op_iws->login);
+               memcpy(&op_iws->client_uuid,
+                      iwp_session->client_identity.login_data,
+                      sizeof(op_iws->client_uuid));
+
+               /* Put ingoing operation in temporary IWS */
+               iwp_iws_set_refs(op_iws, maps);
+       } else {
+               struct mcp_buffer_map map;
+               int i;
+
+               *op_iws = *iws_in;
+
+               /* Insert correct mapping in operation */
+               for (i = 0; i < 4; i++) {
+                       if (!mmus[i])
+                               continue;
+
+                       tee_mmu_buffer(mmus[i], &map);
+                       iwp_iws_set_tmpref(op_iws, i, &map);
+               }
+       }
+
+       /* For the SWd to find the TA slot from the main one */
+       iws->command_id = (u32)iwp_session->op_slot;
+
+       /* TA blob handling */
+       if (!is_xen_domu()) {
+               union mclf_header *header;
+
+               obj = tee_object_get(uuid, true);
+               if (IS_ERR(obj)) {
+                       /* Tell SWd to load TA from SFS as not in registry */
+                       if (PTR_ERR(obj) == -ENOENT)
+                               obj = tee_object_select(uuid);
+
+                       if (IS_ERR(obj))
+                               return PTR_ERR(obj);
+               }
+
+               /* Convert UUID */
+               header = (union mclf_header *)(&obj->data[obj->header_length]);
+               mcuuid_to_tee_uuid(&header->mclf_header_v2.uuid,
+                                  &op_iws->target_uuid);
+
+               /* Create mapping for blob (alloc'd by driver => task = NULL) */
+               {
+                       struct mc_ioctl_buffer buf = {
+                               .va = (uintptr_t)obj->data,
+                               .len = obj->length,
+                               .flags = MC_IO_MAP_INPUT,
+                       };
+
+                       obj_mmu = tee_mmu_create(NULL, &buf);
+                       if (IS_ERR(obj_mmu)) {
+                               ret = PTR_ERR(obj_mmu);
+                               goto err_mmu;
+                       }
+
+                       iws->param_types = TEEC_MEMREF_TEMP_INPUT;
+                       tee_mmu_buffer(obj_mmu, &obj_map);
+                       iwp_iws_set_tmpref(iws, 0, &obj_map);
+                       mc_dev_devel("wsm_type [%04x], offset [%04x]",
+                                    obj_map.type, obj_map.offset);
+                       mc_dev_devel("size [%08x], physical_address [%08llx]",
+                                    obj_map.length, obj_map.addr);
+               }
+       }
+
+       /* Add to local list of sessions so we can receive the notification */
+       mutex_lock(&l_ctx.sessions_lock);
+       list_add_tail(&iwp_session->list, &l_ctx.sessions);
+       mutex_unlock(&l_ctx.sessions_lock);
+
+       /* Send IWP open command */
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               ret = xen_gp_open_session(iwp_session, uuid, maps, iws, op_iws,
+                                         gp_ret);
+       else
+#endif
+               ret = iwp_cmd(iwp_session, SID_OPEN_TA, &op_iws->target_uuid,
+                             true);
+
+       /* Temporary slot is not needed any more */
+       iws_slot_put(iwp_session->op_slot);
+       /* Treat remote errors as errors, just use a specific errno */
+       if (!ret && iws->status != TEEC_SUCCESS) {
+               gp_ret->origin = iws->return_origin;
+               gp_ret->value = iws->status;
+               ret = -ECHILD;
+       }
+
+       if (!ret) {
+               /* set unique identifier for list search */
+               iwp_session->sid = iws->session_handle;
+               /* Get outgoing operation from main IWS */
+               if (operation)
+                       iwp_iws_to_operation(iws, operation);
+               else
+                       *iws_in = *iws;
+
+       } else {
+               /* Remove from list of sessions */
+               mutex_lock(&l_ctx.sessions_lock);
+               list_del(&iwp_session->list);
+               mutex_unlock(&l_ctx.sessions_lock);
+               iws_slot_put(iwp_session->slot);
+               mc_dev_devel("failed: %s from %s, ret %d",
+                            value_to_string(gp_ret->value),
+                            origin_to_string(gp_ret->origin), ret);
+       }
+
+       mutex_unlock(&iwp_session->iws_lock);
+
+       /* Blob not needed as re-mapped by the SWd */
+       if (obj_mmu)
+               tee_mmu_put(obj_mmu);
+
+err_mmu:
+       /* Delete secure object */
+       if (obj)
+               tee_object_free(obj);
+
+       return iwp_set_ret(ret, gp_ret);
+}
+
+static void iwp_session_release(
+       struct iwp_session *iwp_session)
+{
+       iwp_session->state = IWP_SESSION_CLOSED;
+
+       /* Remove from list of sessions */
+       mutex_lock(&l_ctx.sessions_lock);
+       list_del(&iwp_session->list);
+       mutex_unlock(&l_ctx.sessions_lock);
+
+       nq_session_exit(&iwp_session->nq_session);
+       iws_slot_put(iwp_session->slot);
+}
+
+/*
+ * Legacy and GP TAs close differently:
+ * - GP TAs always send a notification with payload, whether on close or crash
+ * - GP TAs may take time to close
+ */
+int iwp_close_session(
+       struct iwp_session *iwp_session)
+{
+       int ret = 0;
+
+       if (is_xen_domu()) {
+#ifdef TRUSTONIC_XEN_DOMU
+               ret = xen_gp_close_session(iwp_session);
+#endif
+       } else {
+               mutex_lock(&iwp_session->iws_lock);
+               iwp_session->state = IWP_SESSION_CLOSE_REQUESTED;
+
+               /* Send IWP open command */
+               ret = iwp_cmd(iwp_session, SID_CLOSE_SESSION, NULL, false);
+               mutex_unlock(&iwp_session->iws_lock);
+       }
+
+       iwp_session_release(iwp_session);
+       mc_dev_devel("close session %x ret %d state %s", iwp_session->sid,
+                    ret, state_to_string(iwp_session->state));
+       return ret;
+}
+
+int iwp_invoke_command_prepare(
+       struct iwp_session *iwp_session,
+       u32 command_id,
+       struct gp_operation *operation,
+       struct mc_ioctl_buffer *bufs,
+       struct gp_shared_memory **parents,
+       struct gp_return *gp_ret)
+{
+       struct interworld_session *iws;
+       int ret = 0;
+
+       if (iwp_session->state != IWP_SESSION_RUNNING)
+               return iwp_set_ret(-EBADFD, gp_ret);
+
+       mutex_lock(&iwp_session->iws_lock);
+       iws = slot_to_iws(iwp_session->slot);
+       memset(iws, 0, sizeof(*iws));
+       iws->session_handle = iwp_session->sid;
+       if (operation) {
+               iws->command_id = command_id;
+               ret = iwp_operation_to_iws(operation, iws, bufs, parents);
+               if (ret)
+                       iwp_invoke_command_abort(iwp_session);
+       }
+
+       return iwp_set_ret(ret, gp_ret);
+}
+
+void iwp_invoke_command_abort(
+       struct iwp_session *iwp_session)
+{
+       mutex_unlock(&iwp_session->iws_lock);
+}
+
+int iwp_invoke_command(
+       struct iwp_session *iwp_session,
+       struct gp_operation *operation,
+       const struct iwp_buffer_map *maps,
+       struct interworld_session *iws_in,
+       struct tee_mmu **mmus,
+       struct gp_return *gp_ret)
+{
+       struct interworld_session *iws = slot_to_iws(iwp_session->slot);
+       int ret = 0;
+
+       /* Operation is NULL when called from Xen BE */
+       if (operation) {
+               /* Update IWS with operation maps */
+               iwp_iws_set_refs(iws, maps);
+       } else {
+               struct mcp_buffer_map map;
+               int i;
+
+               *iws = *iws_in;
+
+               /* Insert correct mapping in operation */
+               for (i = 0; i < 4; i++) {
+                       if (!mmus[i])
+                               continue;
+
+                       tee_mmu_buffer(mmus[i], &map);
+                       iwp_iws_set_tmpref(iws, i, &map);
+               }
+       }
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               ret = xen_gp_invoke_command(iwp_session, maps, iws, gp_ret);
+       else
+#endif
+               ret = iwp_cmd(iwp_session, SID_INVOKE_COMMAND, NULL, true);
+
+       /* Treat remote errors as errors, just use a specific errno */
+       if (!ret && iws->status != TEEC_SUCCESS)
+               ret = -ECHILD;
+
+       if (operation)
+               iwp_iws_to_operation(iws, operation);
+       else
+               *iws_in = *iws;
+
+       if (ret && (ret != -ECHILD)) {
+               ret = iwp_set_ret(ret, gp_ret);
+               mc_dev_devel("failed with ret [%08x]", ret);
+       } else {
+               gp_ret->origin = iws->return_origin;
+               gp_ret->value = iws->status;
+       }
+
+       mutex_unlock(&iwp_session->iws_lock);
+       return ret;
+}
+
+int iwp_request_cancellation(
+       u64 slot)
+{
+       /* Pseudo IWP session for cancellation */
+       struct iwp_session iwp_session;
+       int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_gp_request_cancellation(
+                       (uintptr_t)slot_to_iws(slot));
+#endif
+
+       iwp_session_init(&iwp_session, NULL);
+       /* sid is local. Set is to SID_CANCEL_OPERATION to make things clear */
+       iwp_session.sid = SID_CANCEL_OPERATION;
+       iwp_session.slot = slot;
+       mutex_lock(&l_ctx.sessions_lock);
+       list_add_tail(&iwp_session.list, &l_ctx.sessions);
+       mutex_unlock(&l_ctx.sessions_lock);
+       ret = iwp_cmd(&iwp_session, SID_CANCEL_OPERATION, NULL, false);
+       mutex_lock(&l_ctx.sessions_lock);
+       list_del(&iwp_session.list);
+       mutex_unlock(&l_ctx.sessions_lock);
+       return ret;
+}
+
+static int debug_sessions(struct kasnprintf_buf *buf)
+{
+       struct iwp_session *session;
+       int ret;
+
+       /* Header */
+       ret = kasnprintf(buf, "%20s %4s %-15s %-11s %7s\n",
+                        "CPU clock", "ID", "state", "notif state", "slot");
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&l_ctx.sessions_lock);
+       list_for_each_entry(session, &l_ctx.sessions, list) {
+               const char *state_str;
+               u64 cpu_clk;
+
+               state_str = nq_session_state(&session->nq_session, &cpu_clk);
+               ret = kasnprintf(buf, "%20llu %4x %-15s %-11s %7llu\n", cpu_clk,
+                                session->sid == SID_INVALID ? 0 : session->sid,
+                                state_to_string(session->state), state_str,
+                                session->slot);
+               if (ret < 0)
+                       break;
+       }
+       mutex_unlock(&l_ctx.sessions_lock);
+       return ret;
+}
+
+static ssize_t debug_sessions_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       return debug_generic_read(file, user_buf, count, ppos,
+                                 debug_sessions);
+}
+
+static const struct file_operations debug_sessions_ops = {
+       .read = debug_sessions_read,
+       .llseek = default_llseek,
+       .open = debug_generic_open,
+       .release = debug_generic_release,
+};
+
+static inline int show_log_entry(struct kasnprintf_buf *buf,
+                                struct command_info *cmd_info)
+{
+       const char *state_str = "unknown";
+       const char *value_str = value_to_string(cmd_info->result.value);
+       char value[16];
+
+       switch (cmd_info->state) {
+       case UNUSED:
+               state_str = "unused";
+               break;
+       case PENDING:
+               state_str = "pending";
+               break;
+       case SENT:
+               state_str = "sent";
+               break;
+       case COMPLETE:
+               state_str = "complete";
+               break;
+       case FAILED:
+               state_str = "failed";
+               break;
+       }
+
+       if (!value_str) {
+               snprintf(value, sizeof(value), "%08x", cmd_info->result.value);
+               value_str = value;
+       }
+
+       return kasnprintf(buf, "%20llu %5d %-16s %5x %-8s %5d %-11s %-17s%s\n",
+                         cmd_info->cpu_clk, cmd_info->pid,
+                         cmd_to_string(cmd_info->id), cmd_info->session_id,
+                         state_str, cmd_info->errno,
+                         origin_to_string(cmd_info->result.origin), value_str,
+                         cmd_info->uuid_str);
+}
+
+static int debug_last_cmds(struct kasnprintf_buf *buf)
+{
+       struct command_info *cmd_info;
+       int i, ret = 0;
+
+       /* Initialize MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       ret = kasnprintf(buf, "%20s %5s %-16s %5s %-8s %5s %-11s %-17s%s\n",
+                        "CPU clock", "PID", "command", "S-ID",
+                        "state", "errno", "origin", "value", "UUID");
+       if (ret < 0)
+               goto out;
+
+       cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+       if (cmd_info->state != UNUSED)
+               /* Buffer has wrapped around, dump end (oldest records) */
+               for (i = l_ctx.last_cmds_index; i < LAST_CMDS_SIZE; i++) {
+                       ret = show_log_entry(buf, cmd_info++);
+                       if (ret < 0)
+                               goto out;
+               }
+
+       /* Dump first records */
+       cmd_info = &l_ctx.last_cmds[0];
+       for (i = 0; i < l_ctx.last_cmds_index; i++) {
+               ret = show_log_entry(buf, cmd_info++);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+       return ret;
+}
+
+static ssize_t debug_last_cmds_read(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       return debug_generic_read(file, user_buf, count, ppos, debug_last_cmds);
+}
+
+static const struct file_operations debug_last_cmds_ops = {
+       .read = debug_last_cmds_read,
+       .llseek = default_llseek,
+       .open = debug_generic_open,
+       .release = debug_generic_release,
+};
+
+static inline void mark_iwp_dead(void)
+{
+       struct iwp_session *session;
+
+       l_ctx.iwp_dead = true;
+       /* Signal all potential waiters that SWd is going away */
+       mutex_lock(&l_ctx.sessions_lock);
+       list_for_each_entry(session, &l_ctx.sessions, list)
+               complete(&session->completion);
+       mutex_unlock(&l_ctx.sessions_lock);
+}
+
+static int tee_stop_notifier_fn(struct notifier_block *nb, unsigned long event,
+                               void *data)
+{
+       mark_iwp_dead();
+       return 0;
+}
+
+int iwp_init(void)
+{
+       int i;
+
+       l_ctx.iws = nq_get_iwp_buffer();
+       INIT_LIST_HEAD(&l_ctx.free_iws);
+       INIT_LIST_HEAD(&l_ctx.allocd_iws);
+       l_ctx.iws_list_pool = kcalloc(MAX_IW_SESSION, sizeof(struct iws),
+                                     GFP_KERNEL);
+       if (!l_ctx.iws_list_pool)
+               return -ENOMEM;
+
+       for (i = 0; i < MAX_IW_SESSION; i++) {
+               l_ctx.iws_list_pool[i].slot =
+                       i * sizeof(struct interworld_session);
+               list_add(&l_ctx.iws_list_pool[i].list, &l_ctx.free_iws);
+       }
+
+       mutex_init(&l_ctx.iws_list_lock);
+       INIT_LIST_HEAD(&l_ctx.sessions);
+       mutex_init(&l_ctx.sessions_lock);
+       nq_register_notif_handler(iwp_notif_handler, true);
+       l_ctx.tee_stop_notifier.notifier_call = tee_stop_notifier_fn;
+       nq_register_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+       /* Debugfs */
+       mutex_init(&l_ctx.last_cmds_mutex);
+       return 0;
+}
+
+void iwp_exit(void)
+{
+       mark_iwp_dead();
+       nq_unregister_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+}
+
+int iwp_start(void)
+{
+       /* Create debugfs sessions and last commands entries */
+       debugfs_create_file("iwp_sessions", 0400, g_ctx.debug_dir, NULL,
+                           &debug_sessions_ops);
+       debugfs_create_file("last_iwp_commands", 0400, g_ctx.debug_dir, NULL,
+                           &debug_last_cmds_ops);
+       return 0;
+}
+
+void iwp_stop(void)
+{
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/iwp.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/iwp.h
new file mode 100755 (executable)
index 0000000..949d56e
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_IWP_H_
+#define _MC_IWP_H_
+
+#include "mci/mcloadformat.h"          /* struct identity */
+
+#include "nq.h"
+#include "mcp.h" /* mcp_buffer_map FIXME move to nq? */
+
+struct iwp_session {
+       /* Notification queue session */
+       struct nq_session       nq_session;
+       /* Session ID */
+       u32                     sid;
+       /* IWS slot */
+       u64                     slot;
+       /* IWS other slot needed at open */
+       u64                     op_slot;
+       /* Sessions list (protected by iwp sessions_lock) */
+       struct list_head        list;
+       /* Notification waiter lock */
+       struct mutex            notif_wait_lock;        /* Only one at a time */
+       /* Notification received */
+       struct completion       completion;
+       /* Interworld struct lock */
+       struct mutex            iws_lock;
+       /* Session state (protected by iwp sessions_lock) */
+       enum iwp_session_state {
+               IWP_SESSION_RUNNING,
+               IWP_SESSION_CLOSE_REQUESTED,
+               IWP_SESSION_CLOSED,
+       }                       state;
+       /* GP TAs have login information */
+       struct identity         client_identity;
+};
+
+struct iwp_buffer_map {
+       struct mcp_buffer_map map;
+       u32 sva;
+};
+
+/* Private to iwp_session structure */
+void iwp_session_init(struct iwp_session *session,
+                     const struct identity *identity);
+
+/* Getters */
+static inline u32 iwp_session_id(struct iwp_session *session)
+{
+       return session->sid;
+}
+
+static inline u64 iwp_session_slot(struct iwp_session *session)
+{
+       return session->slot;
+}
+
+/* Convert local errno to GP return values */
+int iwp_set_ret(int ret, struct gp_return *gp_ret);
+
+/* Commands */
+int iwp_register_shared_mem(
+       struct tee_mmu *mmu,
+       u32 *sva,
+       struct gp_return *gp_ret);
+int iwp_release_shared_mem(
+       struct mcp_buffer_map *map);
+int iwp_open_session_prepare(
+       struct iwp_session *session,
+       struct gp_operation *operation,
+       struct mc_ioctl_buffer *bufs,
+       struct gp_shared_memory **parents,
+       struct gp_return *gp_ret);
+void iwp_open_session_abort(
+       struct iwp_session *iwp_session);
+int iwp_open_session(
+       struct iwp_session *iwp_session,
+       const struct mc_uuid_t *uuid,
+       struct gp_operation *operation,
+       const struct iwp_buffer_map *maps,
+       struct interworld_session *iws,
+       struct tee_mmu **mmus,
+       struct gp_return *gp_ret);
+int iwp_close_session(
+       struct iwp_session *iwp_session);
+int iwp_invoke_command_prepare(
+       struct iwp_session *iwp_session,
+       u32 command_id,
+       struct gp_operation *operation,
+       struct mc_ioctl_buffer *bufs,
+       struct gp_shared_memory **parents,
+       struct gp_return *gp_ret);
+void iwp_invoke_command_abort(
+       struct iwp_session *iwp_session);
+int iwp_invoke_command(
+       struct iwp_session *iwp_session,
+       struct gp_operation *operation,
+       const struct iwp_buffer_map *maps,
+       struct interworld_session *iws,
+       struct tee_mmu **mmus,
+       struct gp_return *gp_ret);
+int iwp_request_cancellation(
+       u64 slot);
+
+/* Initialisation/cleanup */
+int iwp_init(void);
+void iwp_exit(void);
+int iwp_start(void);
+void iwp_stop(void);
+
+#endif /* _MC_IWP_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/logging.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/logging.c
new file mode 100755 (executable)
index 0000000..5e45c39
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/version.h>
+
+#include "main.h"
+#include "logging.h"
+
+/* Supported log buffer version */
+#define MC_LOG_VERSION                 2
+
+/* Default length of the log ring buffer 256KiB */
+#define LOG_BUF_ORDER                  6
+
+/* Max Len of a log line for printing */
+#define LOG_LINE_SIZE                  256
+
+/* Definitions for log version 2 */
+#define LOG_TYPE_MASK                  (0x0007)
+#define LOG_TYPE_CHAR                  0
+#define LOG_TYPE_INTEGER               1
+
+/* Field length */
+#define LOG_LENGTH_MASK                        (0x00F8)
+#define LOG_LENGTH_SHIFT               3
+
+/* Extra attributes */
+#define LOG_EOL                                (0x0100)
+#define LOG_INTEGER_DECIMAL            (0x0200)
+#define LOG_INTEGER_SIGNED             (0x0400)
+
+struct mc_logmsg {
+       u16     ctrl;           /* Type and format of data */
+       u16     source;         /* Unique value for each event source */
+       u32     log_data;       /* Value, if any */
+};
+
+/* MobiCore internal trace buffer structure. */
+struct mc_trace_buf {
+       u32     version;        /* version of trace buffer */
+       u32     length;         /* length of buff */
+       u32     head;           /* last write position */
+       u8      buff[];         /* start of the log buffer */
+};
+
+static struct logging_ctx {
+       struct kthread_work work;
+       struct kthread_worker worker;
+       struct task_struct *thread;
+       union {
+               struct mc_trace_buf *trace_buf; /* Circular log buffer */
+               unsigned long trace_page;
+       };
+       u32     tail;                   /* MobiCore log read position */
+       int     thread_err;
+       u16     prev_source;            /* Previous Log source */
+       char    line[LOG_LINE_SIZE + 1];/* Log Line buffer */
+       u32     line_len;               /* Log Line buffer current length */
+#if KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE
+       u32     enabled;                /* Log can be disabled via debugfs */
+#else
+       bool    enabled;                /* Log can be disabled via debugfs */
+#endif
+       bool    dead;
+} log_ctx;
+
+static inline void log_eol(u16 source)
+{
+       if (!log_ctx.line_len)
+               return;
+
+       if (log_ctx.prev_source)
+               /* TEE user-space */
+               dev_info(g_ctx.mcd, "%03x|%s\n", log_ctx.prev_source,
+                        log_ctx.line);
+       else
+               /* TEE kernel */
+               dev_info(g_ctx.mcd, "mtk|%s\n", log_ctx.line);
+
+       log_ctx.line[0] = '\0';
+       log_ctx.line_len = 0;
+}
+
+/*
+ * Collect chars in log_ctx.line buffer and output the buffer when it is full.
+ * No locking needed because only "mobicore_log" thread updates this buffer.
+ */
+static inline void log_char(char ch, u16 source)
+{
+       if (ch == '\0')
+               return;
+
+       if (ch == '\n' || ch == '\r') {
+               log_eol(source);
+               return;
+       }
+
+       if (log_ctx.line_len >= LOG_LINE_SIZE ||
+           source != log_ctx.prev_source)
+               log_eol(source);
+
+       log_ctx.line[log_ctx.line_len++] = ch;
+       log_ctx.line[log_ctx.line_len] = 0;
+       log_ctx.prev_source = source;
+}
+
+static inline void log_string(u32 ch, u16 source)
+{
+       while (ch) {
+               log_char(ch & 0xFF, source);
+               ch >>= 8;
+       }
+}
+
+static inline void log_number(u32 format, u32 value, u16 source)
+{
+       int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
+       char fmt[16];
+       char buffer[32];
+       const char *reader = buffer;
+
+       if (format & LOG_INTEGER_DECIMAL)
+               if (format & LOG_INTEGER_SIGNED)
+                       snprintf(fmt, sizeof(fmt), "%%%ud", width);
+               else
+                       snprintf(fmt, sizeof(fmt), "%%%uu", width);
+       else
+               snprintf(fmt, sizeof(fmt), "%%0%ux", width);
+
+       snprintf(buffer, sizeof(buffer), fmt, value);
+       while (*reader)
+               log_char(*reader++, source);
+}
+
+static inline int log_msg(void *data)
+{
+       struct mc_logmsg *msg = (struct mc_logmsg *)data;
+       int log_type = msg->ctrl & LOG_TYPE_MASK;
+
+       switch (log_type) {
+       case LOG_TYPE_CHAR:
+               log_string(msg->log_data, msg->source);
+               break;
+       case LOG_TYPE_INTEGER:
+               log_number(msg->ctrl, msg->log_data, msg->source);
+               break;
+       }
+       if (msg->ctrl & LOG_EOL)
+               log_eol(msg->source);
+
+       return sizeof(*msg);
+}
+
+static void logging_worker(struct kthread_work *work)
+{
+       static DEFINE_MUTEX(local_mutex);
+
+       mutex_lock(&local_mutex);
+       while (log_ctx.trace_buf->head != log_ctx.tail) {
+               if (log_ctx.trace_buf->version != MC_LOG_VERSION) {
+                       mc_dev_err(-EINVAL, "Bad log data v%d (exp. v%d), stop",
+                                  log_ctx.trace_buf->version, MC_LOG_VERSION);
+                       log_ctx.dead = true;
+                       break;
+               }
+
+               log_ctx.tail += log_msg(&log_ctx.trace_buf->buff[log_ctx.tail]);
+               /* Wrap over if no space left for a complete message */
+               if ((log_ctx.tail + sizeof(struct mc_logmsg)) >
+                                               log_ctx.trace_buf->length)
+                       log_ctx.tail = 0;
+       }
+       mutex_unlock(&local_mutex);
+}
+
+/*
+ * Wake up the log reader thread
+ * This should be called from the places where calls into MobiCore have
+ * generated some logs(eg, yield, SIQ...)
+ */
+void logging_run(void)
+{
+       if (log_ctx.enabled && !log_ctx.dead &&
+           log_ctx.trace_buf->head != log_ctx.tail)
+#if KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
+               queue_kthread_work(&log_ctx.worker, &log_ctx.work);
+#else
+               kthread_queue_work(&log_ctx.worker, &log_ctx.work);
+#endif
+}
+
+/*
+ * Setup MobiCore kernel log. It assumes it's running on CORE 0!
+ * The fastcall will complain is that is not the case!
+ */
+int logging_init(phys_addr_t *buffer, u32 *size)
+{
+       /*
+        * We are going to map this buffer into virtual address space in SWd.
+        * To reduce complexity there, we use a contiguous buffer.
+        */
+       log_ctx.trace_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                             LOG_BUF_ORDER);
+       if (!log_ctx.trace_page)
+               return -ENOMEM;
+
+       *buffer = virt_to_phys((void *)(log_ctx.trace_page));
+       *size = BIT(LOG_BUF_ORDER) * PAGE_SIZE;
+
+       /* Logging thread */
+#if KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
+       init_kthread_work(&log_ctx.work, logging_worker);
+       init_kthread_worker(&log_ctx.worker);
+#else
+       kthread_init_work(&log_ctx.work, logging_worker);
+       kthread_init_worker(&log_ctx.worker);
+#endif
+       log_ctx.thread = kthread_create(kthread_worker_fn, &log_ctx.worker,
+                                       "tee_log");
+       if (IS_ERR(log_ctx.thread))
+               return PTR_ERR(log_ctx.thread);
+
+       wake_up_process(log_ctx.thread);
+
+       /* Debugfs switch */
+       log_ctx.enabled = true;
+       debugfs_create_bool("swd_debug", 0600, g_ctx.debug_dir,
+                           &log_ctx.enabled);
+       return 0;
+}
+
+void logging_exit(bool buffer_busy)
+{
+       /*
+        * This is not racey as the only caller for logging_run is the
+        * scheduler which gets stopped before us, and long before we exit.
+        */
+       kthread_stop(log_ctx.thread);
+       if (!buffer_busy)
+               free_pages(log_ctx.trace_page, LOG_BUF_ORDER);
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/logging.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/logging.h
new file mode 100755 (executable)
index 0000000..34d7bb1
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_LOGGING_H_
+#define _MC_LOGGING_H_
+
+void logging_run(void);
+int logging_init(phys_addr_t *buffer, u32 *size);
+void logging_exit(bool buffer_busy);
+
+#endif /* _MC_LOGGING_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/main.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/main.c
new file mode 100755 (executable)
index 0000000..cf28fcf
--- /dev/null
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"           /* MC_ADMIN_DEVNODE */
+#include "public/mc_linux_api.h"       /* mc_switch_core */
+
+#include "platform.h"                  /* MC_PM_RUNTIME */
+#include "main.h"
+#include "arm.h"
+#include "admin.h"
+#include "user.h"
+#include "iwp.h"
+#include "mcp.h"
+#include "nq.h"
+#include "client.h"
+#include "xen_be.h"
+#include "xen_fe.h"
+#include "build_tag.h"
+
+#define MC_DEVICE_PROPNAME "samsung,exynos-tee"
+
+/* Default entry for our driver in device tree */
+#ifndef MC_DEVICE_PROPNAME
+#define MC_DEVICE_PROPNAME "arm,mcd"
+#endif
+
+/* Define a MobiCore device structure for use with dev_debug() etc */
+static struct device_driver driver = {
+       .name = "Trustonic"
+};
+
+static struct device device = {
+       .driver = &driver
+};
+
+struct mc_device_ctx g_ctx = {
+       .mcd = &device
+};
+
+static struct {
+       /* Device tree compatibility */
+       bool use_platform_driver;
+       /* TEE start return code mutex */
+       struct mutex start_mutex;
+       /* TEE start return code */
+       int start_ret;
+#ifdef MC_PM_RUNTIME
+       /* Whether hibernation succeeded */
+       bool did_hibernate;
+       /* Reboot notifications */
+       struct notifier_block reboot_notifier;
+       /* PM notifications */
+       struct notifier_block pm_notifier;
+#endif
+       /* Devices */
+       dev_t device;
+       struct class *class;
+       /* Admin device */
+       struct cdev admin_cdev;
+       /* User device */
+       dev_t user_dev;
+       struct cdev user_cdev;
+       /* Debug counters */
+       struct mutex struct_counters_buf_mutex;
+       char struct_counters_buf[256];
+       int struct_counters_buf_len;
+} main_ctx;
+
+static int mobicore_start(void);
+static void mobicore_stop(void);
+
+int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...)
+{
+       va_list args;
+       int max_size = buf->size - buf->off;
+       int i;
+
+       va_start(args, fmt);
+       i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
+       if (i >= max_size) {
+               int new_size = PAGE_ALIGN(buf->size + i + 1);
+               char *new_buf = krealloc(buf->buf, new_size, buf->gfp);
+
+               if (!new_buf) {
+                       i = -ENOMEM;
+               } else {
+                       buf->buf = new_buf;
+                       buf->size = new_size;
+                       max_size = buf->size - buf->off;
+                       i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
+               }
+       }
+
+       if (i > 0)
+               buf->off += i;
+
+       va_end(args);
+       return i;
+}
+
+static inline void kasnprintf_buf_reset(struct kasnprintf_buf *buf)
+{
+       kfree(buf->buf);
+       buf->buf = NULL;
+       buf->size = 0;
+       buf->off = 0;
+}
+
+ssize_t debug_generic_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos,
+                          int (*function)(struct kasnprintf_buf *buf))
+{
+       struct kasnprintf_buf *buf = file->private_data;
+       int ret = 0;
+
+       mutex_lock(&buf->mutex);
+       /* Add/update buffer */
+       if (!*ppos) {
+               kasnprintf_buf_reset(buf);
+               ret = function(buf);
+               if (ret < 0) {
+                       kasnprintf_buf_reset(buf);
+                       goto end;
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf->buf,
+                                     buf->off);
+
+end:
+       mutex_unlock(&buf->mutex);
+       return ret;
+}
+
+int debug_generic_open(struct inode *inode, struct file *file)
+{
+       struct kasnprintf_buf *buf;
+
+       file->private_data = kzalloc(sizeof(*buf), GFP_KERNEL);
+       if (!file->private_data)
+               return -ENOMEM;
+
+       buf = file->private_data;
+       mutex_init(&buf->mutex);
+       buf->gfp = GFP_KERNEL;
+       return 0;
+}
+
+int debug_generic_release(struct inode *inode, struct file *file)
+{
+       struct kasnprintf_buf *buf = file->private_data;
+
+       if (!buf)
+               return 0;
+
+       kasnprintf_buf_reset(buf);
+       kfree(buf);
+       return 0;
+}
+
+static ssize_t debug_structs_read(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       return debug_generic_read(file, user_buf, count, ppos,
+                                 clients_debug_structs);
+}
+
+static const struct file_operations debug_structs_ops = {
+       .read = debug_structs_read,
+       .llseek = default_llseek,
+       .open = debug_generic_open,
+       .release = debug_generic_release,
+};
+
+static ssize_t debug_struct_counters_read(struct file *file,
+                                         char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       if (!*ppos) {
+               int ret;
+
+               mutex_lock(&main_ctx.struct_counters_buf_mutex);
+               ret = snprintf(main_ctx.struct_counters_buf,
+                              sizeof(main_ctx.struct_counters_buf),
+                              "clients:  %d\n"
+                              "cbufs:    %d\n"
+                              "cwsms:    %d\n"
+                              "sessions: %d\n"
+                              "swsms:    %d\n"
+                              "mmus:     %d\n"
+                              "maps:     %d\n"
+                              "slots:    %d\n"
+                              "xen maps: %d\n"
+                              "xen fes:  %d\n",
+                              atomic_read(&g_ctx.c_clients),
+                              atomic_read(&g_ctx.c_cbufs),
+                              atomic_read(&g_ctx.c_cwsms),
+                              atomic_read(&g_ctx.c_sessions),
+                              atomic_read(&g_ctx.c_wsms),
+                              atomic_read(&g_ctx.c_mmus),
+                              atomic_read(&g_ctx.c_maps),
+                              atomic_read(&g_ctx.c_slots),
+                              atomic_read(&g_ctx.c_xen_maps),
+                              atomic_read(&g_ctx.c_xen_fes));
+               mutex_unlock(&main_ctx.struct_counters_buf_mutex);
+               if (ret > 0)
+                       main_ctx.struct_counters_buf_len = ret;
+       }
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      main_ctx.struct_counters_buf,
+                                      main_ctx.struct_counters_buf_len);
+}
+
+static const struct file_operations debug_struct_counters_ops = {
+       .read = debug_struct_counters_read,
+       .llseek = default_llseek,
+};
+
+static ssize_t debug_coreswitch_write(struct file *file,
+                                     const char __user *buffer,
+                                     size_t buffer_len, loff_t *ppos)
+{
+       int new_cpu = 0;
+
+       /* Invalid data, nothing to do */
+       if (buffer_len < 1)
+               return -EINVAL;
+
+       if (kstrtoint_from_user(buffer, buffer_len, 0, &new_cpu))
+               return -EINVAL;
+
+       mc_dev_devel("set active cpu to %d", new_cpu);
+       mc_switch_core(new_cpu);
+       return buffer_len;
+}
+
+static ssize_t debug_coreswitch_read(struct file *file, char __user *buffer,
+                                    size_t buffer_len, loff_t *ppos)
+{
+       char cpu_str[8];
+       int ret = 0;
+
+       ret = snprintf(cpu_str, sizeof(cpu_str), "%d\n", mc_active_core());
+       if (ret < 0)
+               return -EINVAL;
+
+       return simple_read_from_buffer(buffer, buffer_len, ppos,
+                                      cpu_str, ret);
+}
+
+static const struct file_operations debug_coreswitch_ops = {
+       .write = debug_coreswitch_write,
+       .read = debug_coreswitch_read,
+};
+
+static inline int device_user_init(void)
+{
+       struct device *dev;
+       int ret = 0;
+
+       main_ctx.user_dev = MKDEV(MAJOR(main_ctx.device), 1);
+       /* Create the user node */
+       mc_user_init(&main_ctx.user_cdev);
+       ret = cdev_add(&main_ctx.user_cdev, main_ctx.user_dev, 1);
+       if (ret) {
+               mc_dev_err(ret, "user cdev_add failed");
+               return ret;
+       }
+
+       main_ctx.user_cdev.owner = THIS_MODULE;
+       dev = device_create(main_ctx.class, NULL, main_ctx.user_dev, NULL,
+                           MC_USER_DEVNODE);
+       if (IS_ERR(dev)) {
+               ret = PTR_ERR(dev);
+               cdev_del(&main_ctx.user_cdev);
+               mc_dev_err(ret, "user device_create failed");
+               return ret;
+       }
+
+       /* Create debugfs structs entry */
+       debugfs_create_file("structs", 0400, g_ctx.debug_dir, NULL,
+                           &debug_structs_ops);
+
+       return 0;
+}
+
+static inline void device_user_exit(void)
+{
+       device_destroy(main_ctx.class, main_ctx.user_dev);
+       cdev_del(&main_ctx.user_cdev);
+}
+
+#ifdef MC_PM_RUNTIME
+static int reboot_notifier(struct notifier_block *nb, unsigned long event,
+                          void *dummy)
+{
+       switch (event) {
+       case SYS_HALT:
+       case SYS_POWER_OFF:
+               main_ctx.did_hibernate = true;
+               break;
+       }
+
+       return 0;
+}
+
+static int suspend_notifier(struct notifier_block *nb, unsigned long event,
+                           void *dummy)
+{
+       int ret = 0;
+
+       main_ctx.did_hibernate = false;
+       switch (event) {
+       case PM_SUSPEND_PREPARE:
+               return nq_suspend();
+       case PM_POST_SUSPEND:
+               return nq_resume();
+#ifdef TRUSTONIC_HIBERNATION_SUPPORT
+       case PM_HIBERNATION_PREPARE:
+               /* Try to stop the TEE nicely (ignore failure) */
+               nq_stop();
+               break;
+       case PM_POST_HIBERNATION:
+               if (main_ctx.did_hibernate) {
+                       /* Really did hibernate */
+                       client_cleanup();
+                       main_ctx.start_ret = TEE_START_NOT_TRIGGERED;
+                       return mobicore_start();
+               }
+
+               /* Did not hibernate, just restart the TEE */
+               ret = nq_start();
+#endif
+       }
+
+       return ret;
+}
+#endif /* MC_PM_RUNTIME */
+
+static inline int check_version(void)
+{
+       struct mc_version_info version_info;
+       int ret;
+
+       /* Must be called before creating the user device node to avoid race */
+       ret = mcp_get_version(&version_info);
+       if (ret)
+               return ret;
+
+       /* CMP version is meaningless in this case and is thus not printed */
+       mc_dev_info("\n"
+                   "    product_id        = %s\n"
+                   "    version_mci       = 0x%08x\n"
+                   "    version_so        = 0x%08x\n"
+                   "    version_mclf      = 0x%08x\n"
+                   "    version_container = 0x%08x\n"
+                   "    version_mc_config = 0x%08x\n"
+                   "    version_tl_api    = 0x%08x\n"
+                   "    version_dr_api    = 0x%08x\n"
+                   "    version_nwd       = 0x%08x\n",
+                   version_info.product_id,
+                   version_info.version_mci,
+                   version_info.version_so,
+                   version_info.version_mclf,
+                   version_info.version_container,
+                   version_info.version_mc_config,
+                   version_info.version_tl_api,
+                   version_info.version_dr_api,
+                   version_info.version_nwd);
+
+       /* Determine which features are supported */
+       if (version_info.version_mci != MC_VERSION(1, 7)) {
+               ret = -EHOSTDOWN;
+               mc_dev_err(ret, "TEE incompatible with this driver");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int mobicore_start_domu(void)
+{
+       mutex_lock(&main_ctx.start_mutex);
+       if (main_ctx.start_ret != TEE_START_NOT_TRIGGERED)
+               goto end;
+
+       /* Must be called before creating the user device node to avoid race */
+       main_ctx.start_ret = check_version();
+       if (main_ctx.start_ret)
+               goto end;
+
+       main_ctx.start_ret = device_user_init();
+end:
+       mutex_unlock(&main_ctx.start_mutex);
+       return main_ctx.start_ret;
+}
+
+static int mobicore_start(void)
+{
+       int ret;
+
+       mutex_lock(&main_ctx.start_mutex);
+       if (main_ctx.start_ret != TEE_START_NOT_TRIGGERED)
+               goto got_ret;
+
+       ret = nq_start();
+       if (ret) {
+               mc_dev_err(ret, "NQ start failed");
+               goto err_nq;
+       }
+
+       ret = mcp_start();
+       if (ret) {
+               mc_dev_err(ret, "MCP start failed");
+               goto err_mcp;
+       }
+
+       ret = iwp_start();
+       if (ret) {
+               mc_dev_err(ret, "IWP start failed");
+               goto err_iwp;
+       }
+
+       /* Must be called before creating the user device node to avoid race */
+       ret = check_version();
+       if (ret)
+               goto err_version;
+
+#ifdef MC_PM_RUNTIME
+       main_ctx.reboot_notifier.notifier_call = reboot_notifier;
+       ret = register_reboot_notifier(&main_ctx.reboot_notifier);
+       if (ret) {
+               mc_dev_err(ret, "reboot notifier registration failed");
+               goto err_pm_notif;
+       }
+
+       main_ctx.pm_notifier.notifier_call = suspend_notifier;
+       ret = register_pm_notifier(&main_ctx.pm_notifier);
+       if (ret) {
+               unregister_reboot_notifier(&main_ctx.reboot_notifier);
+               mc_dev_err(ret, "PM notifier register failed");
+               goto err_pm_notif;
+       }
+#endif
+
+       if (is_xen_dom0()) {
+               ret = xen_be_init();
+               if (ret)
+                       goto err_xen_be;
+       }
+
+       ret = device_user_init();
+       if (ret)
+               goto err_device_user;
+
+       main_ctx.start_ret = 0;
+       goto got_ret;
+
+err_device_user:
+       if (is_xen_dom0())
+               xen_be_exit();
+err_xen_be:
+#ifdef MC_PM_RUNTIME
+       unregister_reboot_notifier(&main_ctx.reboot_notifier);
+       unregister_pm_notifier(&main_ctx.pm_notifier);
+err_pm_notif:
+#endif
+err_version:
+       iwp_stop();
+err_iwp:
+       mcp_stop();
+err_mcp:
+       nq_stop();
+err_nq:
+       main_ctx.start_ret = ret;
+got_ret:
+       mutex_unlock(&main_ctx.start_mutex);
+       return main_ctx.start_ret;
+}
+
+static void mobicore_stop(void)
+{
+       device_user_exit();
+       if (is_xen_dom0())
+               xen_be_exit();
+
+       if (!is_xen_domu()) {
+#ifdef MC_PM_RUNTIME
+               unregister_reboot_notifier(&main_ctx.reboot_notifier);
+               unregister_pm_notifier(&main_ctx.pm_notifier);
+#endif
+               iwp_stop();
+               mcp_stop();
+               nq_stop();
+       }
+}
+
+int mc_wait_tee_start(void)
+{
+       int ret;
+
+       mutex_lock(&main_ctx.start_mutex);
+       while (main_ctx.start_ret == TEE_START_NOT_TRIGGERED) {
+               mutex_unlock(&main_ctx.start_mutex);
+               ssleep(1);
+               mutex_lock(&main_ctx.start_mutex);
+       }
+
+       ret = main_ctx.start_ret;
+       mutex_unlock(&main_ctx.start_mutex);
+       return ret;
+}
+
+static inline int device_common_init(void)
+{
+       int ret;
+
+       ret = alloc_chrdev_region(&main_ctx.device, 0, 2, "trustonic_tee");
+       if (ret) {
+               mc_dev_err(ret, "alloc_chrdev_region failed");
+               return ret;
+       }
+
+       main_ctx.class = class_create(THIS_MODULE, "trustonic_tee");
+       if (IS_ERR(main_ctx.class)) {
+               ret = PTR_ERR(main_ctx.class);
+               mc_dev_err(ret, "class_create failed");
+               unregister_chrdev_region(main_ctx.device, 2);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline void device_common_exit(void)
+{
+       class_destroy(main_ctx.class);
+       unregister_chrdev_region(main_ctx.device, 2);
+}
+
+static inline int device_admin_init(void)
+{
+       struct device *dev;
+       int ret = 0;
+
+       /* Create the ADMIN node */
+       ret = mc_admin_init(&main_ctx.admin_cdev, mobicore_start,
+                           mobicore_stop);
+       if (ret)
+               goto err_init;
+
+       ret = cdev_add(&main_ctx.admin_cdev, main_ctx.device, 1);
+       if (ret) {
+               mc_dev_err(ret, "admin cdev_add failed");
+               goto err_cdev;
+       }
+
+       main_ctx.admin_cdev.owner = THIS_MODULE;
+       dev = device_create(main_ctx.class, NULL, main_ctx.device, NULL,
+                           MC_ADMIN_DEVNODE);
+       if (IS_ERR(dev)) {
+               ret = PTR_ERR(dev);
+               mc_dev_err(ret, "admin device_create failed");
+               goto err_device;
+       }
+
+       return 0;
+
+err_device:
+       cdev_del(&main_ctx.admin_cdev);
+err_cdev:
+       mc_admin_exit();
+err_init:
+       return ret;
+}
+
+static inline void device_admin_exit(void)
+{
+       device_destroy(main_ctx.class, main_ctx.device);
+       cdev_del(&main_ctx.admin_cdev);
+       mc_admin_exit();
+}
+
+/*
+ * This function is called by the kernel during startup or by a insmod command.
+ * This device is installed and registered as cdev, then interrupt and
+ * queue handling is set up
+ */
+static int mobicore_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+
+       if (pdev)
+               g_ctx.mcd->of_node = pdev->dev.of_node;
+
+#ifdef MOBICORE_COMPONENT_BUILD_TAG
+       mc_dev_info("MobiCore %s", MOBICORE_COMPONENT_BUILD_TAG);
+#endif
+       /* Hardware does not support ARM TrustZone -> Cannot continue! */
+       if (!is_xen_domu() && !has_security_extensions()) {
+               ret = -ENODEV;
+               mc_dev_err(ret, "Hardware doesn't support ARM TrustZone!");
+               return ret;
+       }
+
+       /* Running in secure mode -> Cannot load the driver! */
+       if (is_secure_mode()) {
+               ret = -ENODEV;
+               mc_dev_err(ret, "Running in secure MODE!");
+               return ret;
+       }
+
+       /* Make sure we can create debugfs entries */
+       g_ctx.debug_dir = debugfs_create_dir("trustonic_tee", NULL);
+
+       /* Initialize debug counters */
+       atomic_set(&g_ctx.c_clients, 0);
+       atomic_set(&g_ctx.c_cbufs, 0);
+       atomic_set(&g_ctx.c_cwsms, 0);
+       atomic_set(&g_ctx.c_sessions, 0);
+       atomic_set(&g_ctx.c_wsms, 0);
+       atomic_set(&g_ctx.c_mmus, 0);
+       atomic_set(&g_ctx.c_maps, 0);
+       atomic_set(&g_ctx.c_slots, 0);
+       atomic_set(&g_ctx.c_xen_maps, 0);
+       atomic_set(&g_ctx.c_xen_fes, 0);
+       main_ctx.start_ret = TEE_START_NOT_TRIGGERED;
+       mutex_init(&main_ctx.start_mutex);
+       mutex_init(&main_ctx.struct_counters_buf_mutex);
+       /* Create debugfs info entries */
+       debugfs_create_file("structs_counters", 0400, g_ctx.debug_dir, NULL,
+                           &debug_struct_counters_ops);
+       debugfs_create_file("active_cpu", 0600, g_ctx.debug_dir, NULL,
+                           &debug_coreswitch_ops);
+
+       /* Initialize common API layer */
+       client_init();
+
+       /* Initialize plenty of nice features */
+       ret = nq_init();
+       if (ret) {
+               mc_dev_err(ret, "NQ init failed");
+               goto fail_nq_init;
+       }
+
+       ret = mcp_init();
+       if (ret) {
+               mc_dev_err(ret, "MCP init failed");
+               goto err_mcp;
+       }
+
+       ret = iwp_init();
+       if (ret) {
+               mc_dev_err(ret, "IWP init failed");
+               goto err_iwp;
+       }
+
+       ret = device_common_init();
+       if (ret)
+               goto err_common;
+
+       if (!is_xen_domu()) {
+               /* Admin dev is for the daemon to communicate with the driver */
+               ret = device_admin_init();
+               if (ret)
+                       goto err_admin;
+       }
+
+#ifndef MC_DELAYED_TEE_START
+       ret = mobicore_start();
+#endif
+       if (ret)
+               goto err_start;
+
+       return 0;
+
+err_start:
+       if (!is_xen_domu())
+               device_admin_exit();
+err_admin:
+       device_common_exit();
+err_common:
+       iwp_exit();
+err_iwp:
+       mcp_exit();
+err_mcp:
+       nq_exit();
+fail_nq_init:
+       debugfs_remove_recursive(g_ctx.debug_dir);
+       return ret;
+}
+
+static int mobicore_probe_not_of(void)
+{
+       return mobicore_probe(NULL);
+}
+
+static const struct of_device_id of_match_table[] = {
+       { .compatible = MC_DEVICE_PROPNAME },
+       { }
+};
+
+static struct platform_driver mc_plat_driver = {
+       .probe = mobicore_probe,
+       .driver = {
+               .name = "mcd",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_table,
+       }
+};
+
+static int __init mobicore_init(void)
+{
+       dev_set_name(g_ctx.mcd, "TEE");
+       /*
+        * Do not remove or change the following trace.
+        * The string "MobiCore" is used to detect if the TEE is in of the image
+        */
+       mc_dev_info("MobiCore mcDrvModuleApi version is %d.%d",
+                   MCDRVMODULEAPI_VERSION_MAJOR,
+                   MCDRVMODULEAPI_VERSION_MINOR);
+
+       /* In a Xen DomU, just register the front-end */
+       if (is_xen_domu())
+               return xen_fe_init(mobicore_probe_not_of, mobicore_start_domu);
+
+       main_ctx.use_platform_driver =
+               of_find_compatible_node(NULL, NULL, MC_DEVICE_PROPNAME);
+       if (main_ctx.use_platform_driver)
+               return platform_driver_register(&mc_plat_driver);
+
+       return mobicore_probe_not_of();
+}
+
+static void __exit mobicore_exit(void)
+{
+       if (is_xen_domu())
+               xen_fe_exit();
+
+       if (main_ctx.use_platform_driver)
+               platform_driver_unregister(&mc_plat_driver);
+
+       if (!is_xen_domu())
+               device_admin_exit();
+
+       device_common_exit();
+       iwp_exit();
+       mcp_exit();
+       nq_exit();
+       debugfs_remove_recursive(g_ctx.debug_dir);
+}
+
+module_init(mobicore_init);
+module_exit(mobicore_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MobiCore driver");
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/main.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/main.h
new file mode 100755 (executable)
index 0000000..03baa40
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_MAIN_H_
+#define _MC_MAIN_H_
+
+#include <linux/device.h>      /* dev_* macros */
+#include <linux/slab.h>                /* gfp_t */
+#include <linux/fs.h>          /* struct inode and struct file */
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <xen/xen.h>
+
+#define MC_VERSION(major, minor) \
+               ((((major) & 0x0000ffff) << 16) | ((minor) & 0x0000ffff))
+#define MC_VERSION_MAJOR(x) ((x) >> 16)
+#define MC_VERSION_MINOR(x) ((x) & 0xffff)
+
+#define mc_dev_err(__ret__, fmt, ...) \
+       dev_err(g_ctx.mcd, "ERROR %d %s: " fmt "\n", \
+               __ret__, __func__, ##__VA_ARGS__)
+
+#define mc_dev_info(fmt, ...) \
+       dev_info(g_ctx.mcd, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#ifdef DEBUG
+#define mc_dev_devel(fmt, ...) \
+       dev_info(g_ctx.mcd, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* DEBUG */
+#define mc_dev_devel(...)              do {} while (0)
+#endif /* !DEBUG */
+
+#define TEEC_TT_LOGIN_KERNEL   0x80000000
+
+#define TEE_START_NOT_TRIGGERED 1
+
+/* MobiCore Driver Kernel Module context data. */
+struct mc_device_ctx {
+       struct device           *mcd;
+       /* debugfs root */
+       struct dentry           *debug_dir;
+
+       /* Debug counters */
+       atomic_t                c_clients;
+       atomic_t                c_cbufs;
+       atomic_t                c_cwsms;
+       atomic_t                c_sessions;
+       atomic_t                c_wsms;
+       atomic_t                c_mmus;
+       atomic_t                c_maps;
+       atomic_t                c_slots;
+       atomic_t                c_xen_maps;
+       atomic_t                c_xen_fes;
+};
+
+extern struct mc_device_ctx g_ctx;
+
+/* Debug stuff */
+struct kasnprintf_buf {
+       struct mutex mutex;     /* Protect buf/size/off access */
+       gfp_t gfp;
+       void *buf;
+       int size;
+       int off;
+};
+
+/* Wait for TEE to start and get status */
+int mc_wait_tee_start(void);
+
+extern __printf(2, 3)
+int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...);
+ssize_t debug_generic_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos,
+                          int (*function)(struct kasnprintf_buf *buf));
+int debug_generic_open(struct inode *inode, struct file *file);
+int debug_generic_release(struct inode *inode, struct file *file);
+
+#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+static inline unsigned int kref_read(struct kref *kref)
+{
+       return atomic_read(&kref->refcount);
+}
+#endif
+
+/* Xen support */
+
+#ifdef CONFIG_XEN
+#if KERNEL_VERSION(4, 4, 0) <= LINUX_VERSION_CODE
+#define TRUSTONIC_XEN_DOMU
+#endif
+#endif
+
+static inline bool is_xen_dom0(void)
+{
+#if KERNEL_VERSION(3, 18, 0) <= LINUX_VERSION_CODE
+       return xen_domain() && xen_initial_domain();
+#else
+       return false;
+#endif
+}
+
+static inline bool is_xen_domu(void)
+{
+#ifdef TRUSTONIC_XEN_DOMU
+       return xen_domain() && !xen_initial_domain();
+#else
+       return false;
+#endif
+}
+
+#endif /* _MC_MAIN_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/gptci.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/gptci.h
new file mode 100755 (executable)
index 0000000..318d2b7
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GP_TCI_H_
+#define _GP_TCI_H_
+
+struct tee_value {
+       u32 a;
+       u32 b;
+};
+
+struct _teec_memory_reference_internal {
+       u32 sva;
+       u32 len;
+       u32 output_size;
+};
+
+union _teec_parameter_internal {
+       struct tee_value                       value;
+       struct _teec_memory_reference_internal memref;
+};
+
+enum _teec_tci_type {
+       _TA_OPERATION_OPEN_SESSION   = 1,
+       _TA_OPERATION_INVOKE_COMMAND = 2,
+       _TA_OPERATION_CLOSE_SESSION  = 3,
+};
+
+struct _teec_operation_internal {
+       enum _teec_tci_type            type;
+       u32                            command_id;
+       u32                            param_types;
+       union _teec_parameter_internal params[4];
+       bool                           is_cancelled;
+       u8                             rfu_padding[3];
+};
+
+struct _teec_tci {
+       char                            header[8];
+       struct teec_uuid                destination;
+       struct _teec_operation_internal operation;
+       u32                             ready;
+       u32                             return_origin;
+       u32                             return_status;
+};
+
+/**
+ * Termination codes
+ */
+#define TA_EXIT_CODE_PANIC       300
+#define TA_EXIT_CODE_TCI         301
+#define TA_EXIT_CODE_PARAMS      302
+#define TA_EXIT_CODE_FINISHED    303
+#define TA_EXIT_CODE_SESSIONSTATE 304
+#define TA_EXIT_CODE_CREATEFAILED 305
+
+#endif /* _GP_TCI_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcifc.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcifc.h
new file mode 100755 (executable)
index 0000000..312dfe0
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MCIFC_H_
+#define MCIFC_H_
+
+#include "platform.h"
+
+/** @name MobiCore FastCall Defines
+ * Defines for the two different FastCall's.
+ */
+/**/
+
+/* --- global ---- */
+#define MC_FC_INVALID  ((u32)0)  /**< Invalid FastCall ID */
+
+#if (defined(CONFIG_ARM64) && !defined(MC_ARMV7_FC)) || (defined(MC_AARCH32_FC))
+
+#define FASTCALL_OWNER_TZOS          (0x3F000000)
+#define FASTCALL_ATOMIC_MASK         BIT(31)
+/**Trusted OS Fastcalls SMC32 */
+#define MC_FC_STD32_BASE \
+                       ((u32)(FASTCALL_OWNER_TZOS | FASTCALL_ATOMIC_MASK))
+/* SMC32 Trusted OS owned Fastcalls */
+#define MC_FC_STD32(x) ((u32)(MC_FC_STD32_BASE + (x)))
+
+#define MC_FC_INIT     MC_FC_STD32(1)  /**< Initializing FastCall. */
+#define MC_FC_INFO     MC_FC_STD32(2)  /**< Info FastCall. */
+#define MC_FC_MEM_TRACE        MC_FC_STD32(10)  /**< Enable SWd tracing via memory */
+#define MC_FC_SWAP_CPU MC_FC_STD32(54)  /**< Change new active Core */
+
+#else
+
+#define MC_FC_INIT     ((u32)(-1))  /**< Initializing FastCall. */
+#define MC_FC_INFO     ((u32)(-2))  /**< Info FastCall. */
+#define MC_FC_MEM_TRACE        ((u32)(-31))  /**< Enable SWd tracing via memory */
+#define MC_FC_SWAP_CPU ((u32)(0x84000005))  /**< Change new active Core */
+
+#endif
+
+/** @} */
+
+/** @name MobiCore SMC Defines
+ * Defines the different secure monitor calls (SMC) for world switching.
+ */
+/**< Yield to switch from NWd to SWd. */
+#define MC_SMC_N_YIELD                 3
+/**< SIQ to switch from NWd to SWd. */
+#define MC_SMC_N_SIQ                   4
+/** @} */
+
+/** @name MobiCore status
+ *  MobiCore status information.
+ */
+/**< MobiCore is not yet initialized. FastCall FcInit() to set up MobiCore.*/
+#define MC_STATUS_NOT_INITIALIZED      0
+/**< Bad parameters have been passed in FcInit(). */
+#define MC_STATUS_BAD_INIT             1
+/**< MobiCore did initialize properly. */
+#define MC_STATUS_INITIALIZED          2
+/**< MobiCore kernel halted due to an unrecoverable exception. Further
+ * information is available extended info
+ */
+#define MC_STATUS_HALT                 3
+/** @} */
+
+/** @name Extended Info Identifiers
+ *  Extended info parameters for MC_FC_INFO to obtain further information
+ *  depending on MobiCore state.
+ */
+/**< Version of the MobiCore Control Interface (MCI) */
+#define MC_EXT_INFO_ID_MCI_VERSION     0
+/**< MobiCore control flags */
+#define MC_EXT_INFO_ID_FLAGS           1
+/**< MobiCore halt condition code */
+#define MC_EXT_INFO_ID_HALT_CODE       2
+/**< MobiCore halt condition instruction pointer */
+#define MC_EXT_INFO_ID_HALT_IP         3
+/**< MobiCore fault counter */
+#define MC_EXT_INFO_ID_FAULT_CNT       4
+/**< MobiCore last fault cause */
+#define MC_EXT_INFO_ID_FAULT_CAUSE     5
+/**< MobiCore last fault meta */
+#define MC_EXT_INFO_ID_FAULT_META      6
+/**< MobiCore last fault threadid */
+#define MC_EXT_INFO_ID_FAULT_THREAD    7
+/**< MobiCore last fault instruction pointer */
+#define MC_EXT_INFO_ID_FAULT_IP                8
+/**< MobiCore last fault stack pointer */
+#define MC_EXT_INFO_ID_FAULT_SP                9
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_DFSR 10
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_ADFSR        11
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_DFAR 12
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_IFSR 13
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_AIFSR        14
+/**< MobiCore last fault ARM arch information */
+#define MC_EXT_INFO_ID_FAULT_ARCH_IFAR 15
+/**< MobiCore configured by Daemon via fc_init flag */
+#define MC_EXT_INFO_ID_MC_CONFIGURED   16
+/**< MobiCore scheduling status: idle/non-idle */
+#define MC_EXT_INFO_ID_MC_SCHED_STATUS 17
+/**< MobiCore runtime status: initialized, halted */
+#define MC_EXT_INFO_ID_MC_STATUS       18
+/**< MobiCore exception handler last partner */
+#define MC_EXT_INFO_ID_MC_EXC_PARTNER  19
+/**< MobiCore exception handler last peer */
+#define MC_EXT_INFO_ID_MC_EXC_IPCPEER  20
+/**< MobiCore exception handler last IPC message */
+#define MC_EXT_INFO_ID_MC_EXC_IPCMSG   21
+/**< MobiCore exception handler last IPC data */
+#define MC_EXT_INFO_ID_MC_EXC_IPCDATA  22
+/**< MobiCore exception handler last UUID (uses 4 slots: 23 to 26) */
+#define MC_EXT_INFO_ID_MC_EXC_UUID     23
+#define MC_EXT_INFO_ID_MC_EXC_UUID1    24
+#define MC_EXT_INFO_ID_MC_EXC_UUID2    25
+#define MC_EXT_INFO_ID_MC_EXC_UUID3    26
+
+/** @} */
+
+/** @name FastCall return values
+ * Return values of the MobiCore FastCalls.
+ */
+/**< No error. Everything worked fine. */
+#define MC_FC_RET_OK                           0
+/**< FastCall was not successful. */
+#define MC_FC_RET_ERR_INVALID                  1
+/**< MobiCore has already been initialized. */
+#define MC_FC_RET_ERR_ALREADY_INITIALIZED      5
+/**< Call is not allowed. */
+#define TEE_FC_RET_ERR_NOABILITY            6
+/** @} */
+
+/** @name Init FastCall flags
+ * Return flags of the Init FastCall.
+ */
+/**< SWd uses LPAE MMU table format. */
+#define MC_FC_INIT_FLAG_LPAE                   BIT(0)
+/** @} */
+
+#endif /** MCIFC_H_ */
+
+/** @} */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mciiwp.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mciiwp.h
new file mode 100755 (executable)
index 0000000..ecb0055
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MCIIWP_H_
+#define MCIIWP_H_
+
+#include "public/GP/tee_client_types.h" /* teec_uuid FIXME it's all mixed up! */
+
+/** Session ID for notifications for the Dragon CA-to-TA communication protocol
+ *
+ * Session ID are distinct from any valid MCP session identifier
+ * and from the existing pseudo-session identifiers :
+ * - SID_MCP = 0
+ * - SID_INVALID = 0xffffffff
+ *
+ * A session ID is a thread ID, and since thread IDs have a nonzero task ID as
+ * their lowest 16 bits, we can use values of the form 0x????0000
+ */
+#define SID_OPEN_SESSION        (0x00010000)
+#define SID_INVOKE_COMMAND      (0x00020000)
+#define SID_CLOSE_SESSION       (0x00030000)
+#define SID_CANCEL_OPERATION    (0x00040000)
+#define SID_MEMORY_REFERENCE    (0x00050000)
+#define SID_OPEN_TA             (0x00060000)
+#define SID_REQ_TA              (0x00070000)
+
+/* To quickly detect IWP notifications */
+#define SID_IWP_NOTIFICATION \
+       (SID_OPEN_SESSION | SID_INVOKE_COMMAND | SID_CLOSE_SESSION | \
+        SID_CANCEL_OPERATION | SID_MEMORY_REFERENCE | SID_OPEN_TA | SID_REQ_TA)
+
+struct interworld_parameter_value {
+       u32     a;
+       u32     b;
+       u8      unused[8];
+};
+
+/** The API parameter type TEEC_MEMREF_WHOLE is translated into these types
+ * and does not appear in the inter-world protocol.
+ *
+ * - memref_handle references a previously registered memory reference
+ *   'offset' bytes <= memref_handle < 'offset + size' bytes
+ *
+ * These sizes must be contained within the memory reference.
+ */
+struct interworld_parameter_memref {
+       u32     offset;
+       u32     size;
+       u32     memref_handle;
+       u32     unused;
+};
+
+/** This structure is used for the parameter types TEEC_MEMREF_TEMP_xxx.
+ *
+ * The parameter is located in World Shared Memory which is established
+ * for the command and torn down afterwards.
+ *
+ * The number of pages to share is 'size + offset' divided by the page
+ * size, rounded up.
+ * Inside the shared pages, the buffer starts at address 'offset'
+ * and ends after 'size' bytes.
+ *
+ * - wsm_type parameter may be WSM_CONTIGUOUS or WSM_L1.
+ * - offset must be less than the page size (4096).
+ * - size must be less than 0xfffff000.
+ */
+struct interworld_parameter_tmpref {
+       u16     wsm_type;
+       u16     offset;
+       u32     size;
+       u64     physical_address;
+};
+
+/**
+ *
+ */
+union interworld_parameter {
+       struct interworld_parameter_value       value;
+       struct interworld_parameter_memref      memref;
+       struct interworld_parameter_tmpref      tmpref;
+};
+
+/**
+ * An inter-world session structure represents an active session between
+ * a normal world client and RTM.
+ * It is located in the MCI buffer, must be 8-byte aligned
+ *
+ * NB : since the session structure is in shared memory, it must have the
+ * same layout on both sides (normal world kernel and RTM).
+ * All types use platform endianness (specifically, the endianness used by
+ * the secure world).
+ */
+struct interworld_session {
+       u32     status;
+       u32     return_origin;
+       u16     session_handle;
+       u16     param_types;
+
+       union {
+               u32 command_id;    /** invoke-command only */
+               u32 login;         /** open-session only */
+       };
+
+       union interworld_parameter params[4];
+
+       /* The following fields are only used during open-session */
+       struct teec_uuid target_uuid;
+       struct teec_uuid client_uuid;
+};
+
+#endif /** MCIIWP_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcimcp.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcimcp.h
new file mode 100755 (executable)
index 0000000..16a1107
--- /dev/null
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MCP_H_
+#define MCP_H_
+
+#include "mci/mcloadformat.h"
+
+/** Indicates a response */
+#define FLAG_RESPONSE          BIT(31)
+
+/** MobiCore Return Code Defines.
+ * List of the possible MobiCore return codes.
+ */
+enum mcp_result {
+       /** Memory has successfully been mapped */
+       MC_MCP_RET_OK                                   =  0,
+       /** The session ID is invalid */
+       MC_MCP_RET_ERR_INVALID_SESSION                  =  1,
+       /** The UUID of the Trustlet is unknown */
+       MC_MCP_RET_ERR_UNKNOWN_UUID                     =  2,
+       /** The ID of the driver is unknown */
+       MC_MCP_RET_ERR_UNKNOWN_DRIVER_ID                =  3,
+       /** No more session are allowed */
+       MC_MCP_RET_ERR_NO_MORE_SESSIONS                 =  4,
+       /** The container is invalid */
+       MC_MCP_RET_ERR_CONTAINER_INVALID                =  5,
+       /** The Trustlet is invalid */
+       MC_MCP_RET_ERR_TRUSTLET_INVALID                 =  6,
+       /** The memory block has already been mapped before */
+       MC_MCP_RET_ERR_ALREADY_MAPPED                   =  7,
+       /** Alignment or length error in the command parameters */
+       MC_MCP_RET_ERR_INVALID_PARAM                    =  8,
+       /** No space left in the virtual address space of the session */
+       MC_MCP_RET_ERR_OUT_OF_RESOURCES                 =  9,
+       /** WSM type unknown or broken WSM */
+       MC_MCP_RET_ERR_INVALID_WSM                      = 10,
+       /** unknown error */
+       MC_MCP_RET_ERR_UNKNOWN                          = 11,
+       /** Length of map invalid */
+       MC_MCP_RET_ERR_INVALID_MAPPING_LENGTH           = 12,
+       /** Map can only be applied to Trustlet session */
+       MC_MCP_RET_ERR_MAPPING_TARGET                   = 13,
+       /** Couldn't open crypto session */
+       MC_MCP_RET_ERR_OUT_OF_CRYPTO_RESOURCES          = 14,
+       /** System Trustlet signature verification failed */
+       MC_MCP_RET_ERR_SIGNATURE_VERIFICATION_FAILED    = 15,
+       /** System Trustlet public key is wrong */
+       MC_MCP_RET_ERR_WRONG_PUBLIC_KEY                 = 16,
+       /** Wrong containter type(s) */
+       MC_MCP_RET_ERR_CONTAINER_TYPE_MISMATCH          = 17,
+       /** Container is locked (or not activated) */
+       MC_MCP_RET_ERR_CONTAINER_LOCKED                 = 18,
+       /** SPID is not registered with root container */
+       MC_MCP_RET_ERR_SP_NO_CHILD                      = 19,
+       /** UUID is not registered with sp container */
+       MC_MCP_RET_ERR_TL_NO_CHILD                      = 20,
+       /** Unwrapping of root container failed */
+       MC_MCP_RET_ERR_UNWRAP_ROOT_FAILED               = 21,
+       /** Unwrapping of service provider container failed */
+       MC_MCP_RET_ERR_UNWRAP_SP_FAILED                 = 22,
+       /** Unwrapping of Trustlet container failed */
+       MC_MCP_RET_ERR_UNWRAP_TRUSTLET_FAILED           = 23,
+       /** Container version mismatch */
+       MC_MCP_RET_ERR_CONTAINER_VERSION_MISMATCH       = 24,
+       /** Decryption of service provider trustlet failed */
+       MC_MCP_RET_ERR_SP_TL_DECRYPTION_FAILED          = 25,
+       /** Hash check of service provider trustlet failed */
+       MC_MCP_RET_ERR_SP_TL_HASH_CHECK_FAILED          = 26,
+       /** Activation/starting of task failed */
+       MC_MCP_RET_ERR_LAUNCH_TASK_FAILED               = 27,
+       /** Closing of task not yet possible, try again later */
+       MC_MCP_RET_ERR_CLOSE_TASK_FAILED                = 28,
+       /**< Service is blocked and a session cannot be opened to it */
+       MC_MCP_RET_ERR_SERVICE_BLOCKED                  = 29,
+       /**< Service is locked and a session cannot be opened to it */
+       MC_MCP_RET_ERR_SERVICE_LOCKED                   = 30,
+       /**< Service was forcefully killed (due to an administrative command) */
+       MC_MCP_RET_ERR_SERVICE_KILLED                   = 31,
+       /**< Service version is lower than the one installed. */
+       MC_MCP_RET_ERR_DOWNGRADE_NOT_AUTHORIZED         = 32,
+       /**< Filesystem not yet ready. */
+       MC_MCP_RET_ERR_SYSTEM_NOT_READY                 = 33,
+       /** The command is unknown */
+       MC_MCP_RET_ERR_UNKNOWN_COMMAND                  = 50,
+       /** The command data is invalid */
+       MC_MCP_RET_ERR_INVALID_DATA                     = 51
+};
+
+/** Possible MCP Command IDs
+ * Command ID must be between 0 and 0x7FFFFFFF.
+ */
+enum cmd_id {
+       /** Invalid command ID */
+       MC_MCP_CMD_ID_INVALID           = 0x00,
+       /** Open a session */
+       MC_MCP_CMD_OPEN_SESSION         = 0x01,
+       /** Close an existing session */
+       MC_MCP_CMD_CLOSE_SESSION        = 0x03,
+       /** Map WSM to session */
+       MC_MCP_CMD_MAP                  = 0x04,
+       /** Unmap WSM from session */
+       MC_MCP_CMD_UNMAP                = 0x05,
+       /** Prepare for suspend */
+       MC_MCP_CMD_SUSPEND              = 0x06,
+       /** Resume from suspension */
+       MC_MCP_CMD_RESUME               = 0x07,
+       /** Get MobiCore version information */
+       MC_MCP_CMD_GET_MOBICORE_VERSION = 0x09,
+       /** Close MCP and unmap MCI */
+       MC_MCP_CMD_CLOSE_MCP            = 0x0A,
+       /** Load token for device attestation */
+       MC_MCP_CMD_LOAD_TOKEN           = 0x0B,
+       /** Check that TA can be loaded */
+       MC_MCP_CMD_CHECK_LOAD_TA        = 0x0C,
+};
+
+/*
+ * Types of WSM known to the MobiCore.
+ */
+#define WSM_TYPE_MASK          0xFF
+#define WSM_INVALID            0       /** Invalid memory type */
+#define WSM_L1                 3       /** Buffer mapping uses fake L1 table */
+/**< Bitflag indicating that the buffer should be uncached */
+#define WSM_UNCACHED           0x100
+
+/*
+ * Magic number used to identify if Open Command supports GP client
+ * authentication.
+ */
+#define MC_GP_CLIENT_AUTH_MAGIC        0x47504131      /* "GPA1" */
+
+/*
+ * Initialisation values flags
+ */
+/* Set if IRQ is present */
+#define MC_IV_FLAG_IRQ         BIT(0)
+/* Set if GP TIME is supported */
+#define MC_IV_FLAG_TIME                BIT(1)
+/* Set if GP client uses interworld session */
+#define MC_IV_FLAG_IWP         BIT(2)
+
+struct init_values {
+       u32     flags;
+       u32     irq;
+       u32     time_ofs;
+       u32     time_len;
+       /* interworld session buffer offset in MCI */
+       u32     iws_buf_ofs;
+       /* interworld session buffer size */
+       u32     iws_buf_size;
+       u8      padding[8];
+};
+
+/** Command header.
+ * It just contains the command ID. Only values specified in cmd_id are
+ * allowed as command IDs.  If the command ID is unspecified the MobiCore
+ * returns an empty response with the result set to
+ * MC_MCP_RET_ERR_UNKNOWN_COMMAND.
+ */
+struct cmd_header {
+       enum cmd_id     cmd_id; /** Command ID of the command */
+};
+
+/** Response header.
+ * MobiCore will reply to every MCP command with an MCP response.  Like the MCP
+ * command the response consists of a header followed by response data. The
+ * response is written to the same memory location as the MCP command.
+ */
+struct rsp_header {
+       u32             rsp_id; /** Command ID | FLAG_RESPONSE */
+       enum mcp_result result; /** Result of the command execution */
+};
+
+/** @defgroup CMD MCP Commands
+ */
+
+/** @defgroup ASMCMD Administrative Commands
+ */
+
+/** @defgroup MCPGETMOBICOREVERSION GET_MOBICORE_VERSION
+ * Get MobiCore version info.
+ *
+ */
+
+/** Get MobiCore Version Command */
+struct cmd_get_version {
+       struct cmd_header       cmd_header;     /** Command header */
+};
+
+/** Get MobiCore Version Command Response */
+struct rsp_get_version {
+       struct rsp_header       rsp_header;     /** Response header */
+       struct mc_version_info  version_info;   /** MobiCore version info */
+};
+
+/** @defgroup POWERCMD Power Management Commands
+ */
+
+/** @defgroup MCPSUSPEND SUSPEND
+ * Prepare MobiCore suspension.
+ * This command allows MobiCore and MobiCore drivers to release or clean
+ * resources and save device state.
+ *
+ */
+
+/** Suspend Command */
+struct cmd_suspend {
+       struct cmd_header       cmd_header;     /** Command header */
+};
+
+/** Suspend Command Response */
+struct rsp_suspend {
+       struct rsp_header       rsp_header;     /** Response header */
+};
+
+/** @defgroup MCPRESUME RESUME
+ * Resume MobiCore from suspension.
+ * This command allows MobiCore and MobiCore drivers to reinitialize hardware
+ * affected by suspension.
+ *
+ */
+
+/** Resume Command */
+struct cmd_resume {
+       struct cmd_header       cmd_header;     /** Command header */
+};
+
+/** Resume Command Response */
+struct rsp_resume {
+       struct rsp_header       rsp_header;     /** Response header */
+};
+
+/** @defgroup SESSCMD Session Management Commands
+ */
+
+/** @defgroup MCPOPEN OPEN
+ * Load and open a session to a Trustlet.
+ * The OPEN command loads Trustlet data to the MobiCore context and opens a
+ * session to the Trustlet.  If wsm_data_type is WSM_INVALID MobiCore tries to
+ * start a pre-installed Trustlet associated with the uuid passed.  The uuid
+ * passed must match the uuid contained in the load data (if available).
+ * On success, MobiCore returns the session ID which can be used for further
+ * communication.
+ */
+
+/** GP client authentication data */
+struct cmd_open_data {
+       u32             mclf_magic;     /** ASCII "MCLF" on older versions */
+       struct identity identity;       /** Login method and data */
+};
+
+/** Open Command */
+struct cmd_open {
+       struct cmd_header cmd_header;   /** Command header */
+       struct mc_uuid_t uuid;          /** Service UUID */
+       u8              unused[4];      /** Padding to be 64-bit aligned */
+       u64             adr_tci_buffer; /** Physical address of the TCI MMU */
+       u64             adr_load_data;  /** Physical address of the data MMU */
+       u32             ofs_tci_buffer; /** Offset to the data */
+       u32             len_tci_buffer; /** Length of the TCI */
+       u32             wsmtype_tci;    /** Type of WSM used for the TCI */
+       u32             wsm_data_type;  /** Type of MMU */
+       u32             ofs_load_data;  /** Offset to the data */
+       u32             len_load_data;  /** Length of the data to load */
+       union {
+               struct cmd_open_data    cmd_open_data;  /** Client login data */
+               union mclf_header       tl_header;      /** Service header */
+       };
+       u32             is_gpta;        /** true if looking for an SD/GP-TA */
+};
+
+/** Open Command Response */
+struct rsp_open {
+       struct rsp_header       rsp_header;     /** Response header */
+       u32     session_id;     /** Session ID */
+};
+
+/** TA Load Check Command */
+struct cmd_check_load {
+       struct cmd_header cmd_header;   /** Command header */
+       struct mc_uuid_t uuid;  /** Service UUID */
+       u8              unused[4];      /** Padding to be 64-bit aligned */
+       u64             adr_load_data;  /** Physical address of the data */
+       u32             wsm_data_type;  /** Type of MMU */
+       u32             ofs_load_data;  /** Offset to the data */
+       u32             len_load_data;  /** Length of the data to load */
+       union mclf_header tl_header;    /** Service header */
+};
+
+/** TA Load Check Response */
+struct rsp_check_load {
+       struct rsp_header       rsp_header;     /** Response header */
+};
+
+/** @defgroup MCPCLOSE CLOSE
+ * Close an existing session to a Trustlet.
+ * The CLOSE command terminates a session and frees all resources in the
+ * MobiCore system which are currently occupied by the session. Before closing
+ * the session, the MobiCore runtime management waits until all pending
+ * operations, like calls to drivers, invoked by the Trustlet have been
+ * terminated.  Mapped memory will automatically be unmapped from the MobiCore
+ * context. The NWd is responsible for processing the freed memory according to
+ * the Rich-OS needs.
+ *
+ */
+
+/** Close Command */
+struct cmd_close {
+       struct cmd_header       cmd_header;     /** Command header */
+       u32             session_id;     /** Session ID */
+};
+
+/** Close Command Response */
+struct rsp_close {
+       struct rsp_header       rsp_header;     /** Response header */
+};
+
+/** @defgroup MCPMAP MAP
+ * Map a portion of memory to a session.
+ * The MAP command provides a block of memory to the context of a service.
+ * The memory then becomes world-shared memory (WSM).
+ * The only allowed memory type here is WSM_L1.
+ */
+
+/** Map Command */
+struct cmd_map {
+       struct cmd_header cmd_header;   /** Command header */
+       u32             session_id;     /** Session ID */
+       u32             wsm_type;       /** Type of MMU */
+       u32             ofs_buffer;     /** Offset to the payload */
+       u64             adr_buffer;     /** Physical address of the MMU */
+       u32             len_buffer;     /** Length of the buffer */
+       u32             flags;          /** Attributes (read/write) */
+};
+
+#define MCP_MAP_MAX         0x100000    /** Maximum length for MCP map */
+
+/** Map Command Response */
+struct rsp_map {
+       struct rsp_header rsp_header;   /** Response header */
+       /** Virtual address the WSM is mapped to, may include an offset! */
+       u32             secure_va;
+};
+
+/** @defgroup MCPUNMAP UNMAP
+ * Unmap a portion of world-shared memory from a session.
+ * The UNMAP command is used to unmap a previously mapped block of
+ * world shared memory from the context of a session.
+ *
+ * Attention: The memory block will be immediately unmapped from the specified
+ * session.  If the service is still accessing the memory, the service will
+ * trigger a segmentation fault.
+ */
+
+/** Unmap Command */
+struct cmd_unmap {
+       struct cmd_header cmd_header;   /** Command header */
+       u32             session_id;     /** Session ID */
+       u32             wsm_type;       /** Type of WSM used of the memory */
+       /** Virtual address the WSM is mapped to, may include an offset! */
+       u32             secure_va;
+       u32             virtual_buffer_len;  /** Length of virtual buffer */
+};
+
+/** Unmap Command Response */
+struct rsp_unmap {
+       struct rsp_header rsp_header;   /** Response header */
+};
+
+/** @defgroup MCPLOADTOKEN
+ * Load a token from the normal world and share it with the TEE
+ * If something fails, the device attestation functionality will be disabled
+ */
+
+/** Load Token */
+struct cmd_load_token {
+       struct cmd_header cmd_header;   /** Command header */
+       u32             wsm_data_type;  /** Type of MMU */
+       u64             adr_load_data;  /** Physical address of the MMU */
+       u64             ofs_load_data;  /** Offset to the data */
+       u64             len_load_data;  /** Length of the data */
+};
+
+/** Load Token Command Response */
+struct rsp_load_token {
+       struct rsp_header rsp_header;   /** Response header */
+};
+
+/** Structure of the MCP buffer */
+union mcp_message {
+       struct init_values      init_values;    /** Initialisation values */
+       struct cmd_header       cmd_header;     /** Command header */
+       struct rsp_header       rsp_header;
+       struct cmd_open         cmd_open;       /** Load and open service */
+       struct rsp_open         rsp_open;
+       struct cmd_close        cmd_close;      /** Close command */
+       struct rsp_close        rsp_close;
+       struct cmd_map          cmd_map;        /** Map WSM to service */
+       struct rsp_map          rsp_map;
+       struct cmd_unmap        cmd_unmap;      /** Unmap WSM from service */
+       struct rsp_unmap        rsp_unmap;
+       struct cmd_suspend      cmd_suspend;    /** Suspend MobiCore */
+       struct rsp_suspend      rsp_suspend;
+       struct cmd_resume       cmd_resume;     /** Resume MobiCore */
+       struct rsp_resume       rsp_resume;
+       struct cmd_get_version  cmd_get_version; /** Get MobiCore Version */
+       struct rsp_get_version  rsp_get_version;
+       struct cmd_load_token   cmd_load_token; /** Load token */
+       struct rsp_load_token   rsp_load_token;
+       struct cmd_check_load   cmd_check_load; /** TA load check */
+       struct rsp_check_load   rsp_check_load;
+};
+
+/** Minimum MCP buffer length (in bytes) */
+#define MIN_MCP_LEN         sizeof(mcp_message_t)
+
+#define MC_FLAG_NO_SLEEP_REQ   0
+#define MC_FLAG_REQ_TO_SLEEP   1
+
+#define MC_STATE_NORMAL_EXECUTION 0
+#define MC_STATE_READY_TO_SLEEP   1
+
+#define MC_STATE_FLAG_TEE_HALT_MASK BIT(0)
+
+struct sleep_mode {
+       u16             sleep_req;      /** Ask SWd to get ready to sleep */
+       u16             ready_to_sleep; /** SWd is now ready to sleep */
+};
+
+/** MobiCore status flags */
+struct mcp_flags {
+       /** If not MC_FLAG_SCHEDULE_IDLE, MobiCore needsscheduling */
+       u32             schedule;
+       struct sleep_mode sleep_mode;
+       /** Secure-world sleep timeout in milliseconds */
+       s32             timeout_ms;
+       /** TEE flags */
+       u8              tee_flags;
+       /** Reserved for future use */
+       u8              RFU_padding[3];
+};
+
+/** MobiCore is idle. No scheduling required */
+#define MC_FLAG_SCHEDULE_IDLE      0
+/** MobiCore is non idle, scheduling is required */
+#define MC_FLAG_SCHEDULE_NON_IDLE  1
+
+/** MCP buffer structure */
+struct mcp_buffer {
+       struct mcp_flags flags;         /** MobiCore Flags */
+       union mcp_message message;      /** MCP message buffer */
+};
+
+#endif /* MCP_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcinq.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcinq.h
new file mode 100755 (executable)
index 0000000..59baf03
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef NQ_H_
+#define NQ_H_
+
+/** \name NQ Size Defines
+ * Minimum and maximum count of elements in the notification queue.
+ */
+#define MIN_NQ_ELEM 1  /** Minimum notification queue elements */
+#define MAX_NQ_ELEM 64 /** Maximum notification queue elements */
+
+/* Compute notification queue size in bytes from its number of elements */
+#define NQ_SIZE(n)   (2 * (sizeof(struct notification_queue_header)\
+                       + (n) * sizeof(struct notification)))
+
+/** \name NQ Length Defines
+ * Note that there is one queue for NWd->SWd and one queue for SWd->NWd
+ */
+/** Minimum size for the notification queue data structure */
+#define MIN_NQ_LEN NQ_SIZE(MIN_NQ_ELEM)
+/** Maximum size for the notification queue data structure */
+#define MAX_NQ_LEN NQ_SIZE(MAX_NQ_ELEM)
+
+/** \name Session ID Defines
+ * Standard Session IDs.
+ */
+/** MCP session ID, used to communicate with MobiCore (e.g. to start/stop TA) */
+#define SID_MCP       0
+/** Invalid session id, returned in case of error */
+#define SID_INVALID   0xffffffff
+
+/** Notification data structure */
+struct notification {
+       u32     session_id;     /** Session ID */
+       s32     payload;        /** Additional notification info */
+};
+
+/** Notification payload codes.
+ * 0 indicated a plain simple notification,
+ * a positive value is a termination reason from the task,
+ * a negative value is a termination reason from MobiCore.
+ * Possible negative values are given below.
+ */
+enum notification_payload {
+       /** task terminated, but exit code is invalid */
+       ERR_INVALID_EXIT_CODE = -1,
+       /** task terminated due to session end, no exit code available */
+       ERR_SESSION_CLOSE     = -2,
+       /** task terminated due to invalid operation */
+       ERR_INVALID_OPERATION = -3,
+       /** session ID is unknown */
+       ERR_INVALID_SID       = -4,
+       /** session is not active */
+       ERR_SID_NOT_ACTIVE    = -5,
+       /** session was force-killed (due to an administrative command). */
+       ERR_SESSION_KILLED    = -6,
+};
+
+/** Declaration of the notification queue header.
+ * layout as specified in the data structure specification.
+ */
+struct notification_queue_header {
+       u32     write_cnt;      /** Write counter */
+       u32     read_cnt;       /** Read counter */
+       u32     queue_size;     /** Queue size */
+};
+
+/** Queue struct which defines a queue object.
+ * The queue struct is accessed by the queue<operation> type of
+ * function. elementCnt must be a power of two and the power needs
+ * to be smaller than power of u32 (obviously 32).
+ */
+struct notification_queue {
+       struct notification_queue_header hdr;           /** Queue header */
+       struct notification notification[MIN_NQ_ELEM];  /** Elements */
+};
+
+#endif /** NQ_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcitime.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcitime.h
new file mode 100755 (executable)
index 0000000..7b08c72
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MCITIME_H_
+#define MCITIME_H_
+
+/*
+ * Trustonic TEE RICH OS Time:
+ * -seconds and nanoseconds since Jan 1, 1970, UTC
+ * -monotonic counter
+ */
+struct mcp_time {
+       u64     wall_clock_seconds;
+       u64     wall_clock_nsec;
+       u64     monotonic_seconds;
+       u64     monotonic_nsec;
+};
+
+#endif /* MCITIME_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcloadformat.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mci/mcloadformat.h
new file mode 100755 (executable)
index 0000000..c13d849
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MCLOADFORMAT_H_
+#define MCLOADFORMAT_H_
+
+/** Trustlet Blob length info */
+#define MC_TLBLOBLEN_MAGIC     0x7672746C      /* Magic for SWd: vrtl */
+#define MAX_SO_CONT_SIZE       512             /* Max size for a container */
+
+/** MCLF flags */
+/**< Loaded service cannot be unloaded from MobiCore. */
+#define MC_SERVICE_HEADER_FLAGS_PERMANENT              BIT(0)
+/**< Service has no WSM control interface. */
+#define MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE   BIT(1)
+/**< Service can be debugged. */
+#define MC_SERVICE_HEADER_FLAGS_DEBUGGABLE             BIT(2)
+/**< New-layout trusted application or trusted driver. */
+#define MC_SERVICE_HEADER_FLAGS_EXTENDED_LAYOUT                BIT(3)
+
+/** Service type.
+ * The service type defines the type of executable.
+ */
+enum service_type {
+       SERVICE_TYPE_ILLEGAL            = 0,
+       SERVICE_TYPE_DRIVER             = 1,
+       SERVICE_TYPE_SP_TRUSTLET        = 2,
+       SERVICE_TYPE_SYSTEM_TRUSTLET    = 3,
+       SERVICE_TYPE_MIDDLEWARE         = 4,
+       SERVICE_TYPE_LAST_ENTRY         = 5,
+};
+
+/**
+ * Descriptor for a memory segment.
+ */
+struct segment_descriptor {
+       u32     start;  /**< Virtual start address */
+       u32     len;    /**< Segment length in bytes */
+};
+
+/**
+ * MCLF intro for data structure identification.
+ * Must be the first element of a valid MCLF file.
+ */
+struct mclf_intro {
+       u32     magic;          /**< Header magic value ASCII "MCLF" */
+       u32     version;        /**< Version the MCLF header struct */
+};
+
+/**
+ * @defgroup MCLF_VER_V2   MCLF Version 32
+ * @ingroup MCLF_VER
+ *
+ * @addtogroup MCLF_VER_V2
+ */
+
+/*
+ * GP TA identity.
+ */
+struct identity {
+       /**< GP TA login type */
+       u32     login_type;
+       /**< GP TA login data */
+       u8      login_data[16];
+};
+
+/**
+ * Version 2.1/2.2 MCLF header.
+ */
+struct mclf_header_v2 {
+       /**< MCLF header start with the mandatory intro */
+       struct mclf_intro       intro;
+       /**< Service flags */
+       u32     flags;
+       /**< Type of memory the service must be executed from */
+       u32     mem_type;
+       /**< Type of service */
+       enum service_type       service_type;
+       /**< Number of instances which can be run simultaneously */
+       u32     num_instances;
+       /**< Loadable service unique identifier (UUID) */
+       struct mc_uuid_t        uuid;
+       /**< If the service_type is SERVICE_TYPE_DRIVER the Driver ID is used */
+       u32     driver_id;
+       /**<
+        * Number of threads (N) in a service:
+        *   SERVICE_TYPE_SP_TRUSTLET: N = 1
+        *   SERVICE_TYPE_SYSTEM_TRUSTLET: N = 1
+        *   SERVICE_TYPE_DRIVER: N >= 1
+        */
+       u32     num_threads;
+       /**< Virtual text segment */
+       struct segment_descriptor text;
+       /**< Virtual data segment */
+       struct segment_descriptor data;
+       /**< Length of the BSS segment in bytes. MUST be at least 8 byte */
+       u32     bss_len;
+       /**< Virtual start address of service code */
+       u32     entry;
+       /**< Version of the interface the driver exports */
+       u32     service_version;
+};
+
+/**
+ * @addtogroup MCLF
+ */
+
+/** MCLF header */
+union mclf_header {
+       /**< Intro for data identification */
+       struct mclf_intro       intro;
+       /**< Version 2 header */
+       struct mclf_header_v2   mclf_header_v2;
+};
+
+struct mc_blob_len_info {
+       u32     magic;          /**< New blob format magic number */
+       u32     root_size;      /**< Root container size */
+       u32     sp_size;        /**< SP container size */
+       u32     ta_size;        /**< TA container size */
+       u32     reserved[4];    /**< Reserved for further Use */
+};
+
+#endif /* MCLOADFORMAT_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mcp.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/mcp.c
new file mode 100644 (file)
index 0000000..f8a078f
--- /dev/null
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/freezer.h>
+#include <asm/barrier.h>
+#include <linux/irq.h>
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/clock.h> /* local_clock */
+#endif
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#include "mci/mcimcp.h"
+#include "mci/mcifc.h"
+#include "mci/mcinq.h"         /* SID_MCP */
+#include "mci/mcitime.h"       /* struct mcp_time */
+#include "mci/mciiwp.h"
+
+#include "main.h"
+#include "admin.h"             /* tee_object* for 'blob' */
+#include "mmu.h"               /* MMU for 'blob' */
+#include "nq.h"
+#include "xen_fe.h"
+#include "mcp.h"
+
+/* respond timeout for MCP notification, in secs */
+#define MCP_TIMEOUT            10
+#define MCP_RETRIES            5
+#define MCP_NF_QUEUE_SZ                8
+
+static struct {
+       union mcp_message       *buffer;        /* MCP communication buffer */
+       struct mutex            buffer_mutex;   /* Lock for the buffer above */
+       struct completion complete;
+       bool mcp_dead;
+       struct mcp_session      mcp_session;    /* Pseudo session for MCP */
+       /* Unexpected notification (during MCP open) */
+       struct mutex            unexp_notif_mutex;
+       struct notification     unexp_notif;
+       /* Sessions */
+       struct mutex            sessions_lock;
+       struct list_head        sessions;
+       /* TEE bad state detection */
+       struct notifier_block   tee_stop_notifier;
+       u32                     timeout_period;
+       /* Log of last commands */
+#define LAST_CMDS_SIZE 1024
+       struct mutex            last_cmds_mutex;        /* Log protection */
+       struct command_info {
+               u64                     cpu_clk;        /* Kernel time */
+               pid_t                   pid;            /* Caller PID */
+               enum cmd_id             id;             /* MCP command ID */
+               u32                     session_id;
+               char                    uuid_str[34];
+               enum state {
+                       UNUSED,         /* Unused slot */
+                       PENDING,        /* Previous command in progress */
+                       SENT,           /* Waiting for response */
+                       COMPLETE,       /* Got result */
+                       FAILED,         /* Something went wrong */
+               }                       state;  /* Command processing state */
+               int                     errno;  /* Return code */
+               enum mcp_result         result; /* Command result */
+       }                               last_cmds[LAST_CMDS_SIZE];
+       int                             last_cmds_index;
+} l_ctx;
+
+static const char *cmd_to_string(enum cmd_id id)
+{
+       switch (id) {
+       case MC_MCP_CMD_ID_INVALID:
+               return "invalid";
+       case MC_MCP_CMD_OPEN_SESSION:
+               return "open session";
+       case MC_MCP_CMD_CLOSE_SESSION:
+               return "close session";
+       case MC_MCP_CMD_MAP:
+               return "map";
+       case MC_MCP_CMD_UNMAP:
+               return "unmap";
+       case MC_MCP_CMD_SUSPEND:
+               return "suspend";
+       case MC_MCP_CMD_RESUME:
+               return "resume";
+       case MC_MCP_CMD_GET_MOBICORE_VERSION:
+               return "get version";
+       case MC_MCP_CMD_CLOSE_MCP:
+               return "close mcp";
+       case MC_MCP_CMD_LOAD_TOKEN:
+               return "load token";
+       case MC_MCP_CMD_CHECK_LOAD_TA:
+               return "check load TA";
+       }
+       return "unknown";
+}
+
+static const char *state_to_string(enum mcp_session_state state)
+{
+       switch (state) {
+       case MCP_SESSION_RUNNING:
+               return "running";
+       case MCP_SESSION_CLOSE_FAILED:
+               return "close failed";
+       case MCP_SESSION_CLOSED:
+               return "closed";
+       }
+       return "error";
+}
+
+static inline void mark_mcp_dead(void)
+{
+       struct mcp_session *session;
+
+       l_ctx.mcp_dead = true;
+       complete(&l_ctx.complete);
+       /* Signal all potential waiters that SWd is going away */
+       list_for_each_entry(session, &l_ctx.sessions, list)
+               complete(&session->completion);
+}
+
+static int tee_stop_notifier_fn(struct notifier_block *nb, unsigned long event,
+                               void *data)
+{
+       mark_mcp_dead();
+       return 0;
+}
+
+void mcp_session_init(struct mcp_session *session)
+{
+       nq_session_init(&session->nq_session, false);
+       session->sid = SID_INVALID;
+       INIT_LIST_HEAD(&session->list);
+       mutex_init(&session->notif_wait_lock);
+       init_completion(&session->completion);
+       mutex_init(&session->exit_code_lock);
+       session->exit_code = 0;
+       session->state = MCP_SESSION_RUNNING;
+       session->notif_count = 0;
+}
+
+static inline bool mcp_session_isrunning(struct mcp_session *session)
+{
+       bool ret;
+
+       mutex_lock(&l_ctx.sessions_lock);
+       ret = session->state == MCP_SESSION_RUNNING;
+       mutex_unlock(&l_ctx.sessions_lock);
+       return ret;
+}
+
+/*
+ * session remains valid thanks to the upper layers reference counters, but the
+ * SWd session may have died, in which case we are informed.
+ */
+int mcp_wait(struct mcp_session *session, s32 timeout, bool silent_expiry)
+{
+       s32 err;
+       int ret = 0;
+
+       mutex_lock(&session->notif_wait_lock);
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu()) {
+               ret = xen_mc_wait(session, timeout, silent_expiry);
+               mutex_unlock(&session->notif_wait_lock);
+               return ret;
+       }
+#endif
+
+       if (l_ctx.mcp_dead) {
+               ret = -EHOSTUNREACH;
+               goto end;
+       }
+
+       if (!mcp_session_isrunning(session)) {
+               ret = -ENXIO;
+               goto end;
+       }
+
+       mcp_get_err(session, &err);
+       if (err) {
+               ret = -ECOMM;
+               goto end;
+       }
+
+       if (timeout < 0) {
+               ret = wait_for_completion_interruptible(&session->completion);
+               if (ret)
+                       goto end;
+       } else {
+               ret = wait_for_completion_interruptible_timeout(
+                       &session->completion, timeout * HZ / 1000);
+               if (ret < 0)
+                       /* Interrupted */
+                       goto end;
+
+               if (!ret) {
+                       /* Timed out */
+                       ret = -ETIME;
+                       goto end;
+               }
+
+               ret = 0;
+       }
+
+       if (l_ctx.mcp_dead) {
+               ret = -EHOSTUNREACH;
+               goto end;
+       }
+
+       mcp_get_err(session, &err);
+       if (err) {
+               ret = -ECOMM;
+               goto end;
+       }
+
+       if (!mcp_session_isrunning(session)) {
+               ret = -ENXIO;
+               goto end;
+       }
+
+end:
+       if (!ret)
+               nq_session_state_update(&session->nq_session,
+                                       NQ_NOTIF_CONSUMED);
+       else if (ret != -ERESTARTSYS)
+               nq_session_state_update(&session->nq_session, NQ_NOTIF_DEAD);
+
+       mutex_unlock(&session->notif_wait_lock);
+       if (ret && ((ret != -ETIME) || !silent_expiry)) {
+#ifdef CONFIG_FREEZER
+               if (ret == -ERESTARTSYS && system_freezing_cnt.counter == 1)
+                       mc_dev_devel("freezing session %x", session->sid);
+               else
+#endif
+                       mc_dev_devel("session %x ec %d ret %d",
+                                    session->sid, session->exit_code, ret);
+       }
+
+       return ret;
+}
+
+int mcp_get_err(struct mcp_session *session, s32 *err)
+{
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_mc_get_err(session, err);
+#endif
+
+       mutex_lock(&session->exit_code_lock);
+       *err = session->exit_code;
+       mutex_unlock(&session->exit_code_lock);
+       if (*err)
+               mc_dev_info("session %x ec %d", session->sid, *err);
+
+       return 0;
+}
+
+static inline int wait_mcp_notification(void)
+{
+       unsigned long timeout = msecs_to_jiffies(l_ctx.timeout_period * 1000);
+       int try, ret = -ETIME;
+
+       /*
+        * Total timeout is l_ctx.timeout_period * MCP_RETRIES, but we check for
+        * a crash to try and terminate before then if things go wrong.
+        */
+       for (try = 1; try <= MCP_RETRIES; try++) {
+               /*
+                * Wait non-interruptible to keep MCP synchronised even if
+                * caller is interrupted by signal.
+                */
+               if (wait_for_completion_timeout(&l_ctx.complete, timeout) > 0)
+                       return 0;
+
+               mc_dev_err(ret, "no answer after %ds",
+                          l_ctx.timeout_period * try);
+       }
+
+       mc_dev_err(ret, "timed out waiting for MCP notification");
+       nq_signal_tee_hung();
+       return ret;
+}
+
+static int mcp_cmd(union mcp_message *cmd,
+                  /* The fields below are for debug purpose only */
+                  u32 in_session_id,
+                  u32 *out_session_id,
+                  struct mc_uuid_t *uuid)
+{
+       int err = 0, ret = -EHOSTUNREACH;
+       union mcp_message *msg;
+       enum cmd_id cmd_id = cmd->cmd_header.cmd_id;
+       struct command_info *cmd_info;
+
+       /* Initialize MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+       cmd_info->cpu_clk = local_clock();
+       cmd_info->pid = current->pid;
+       cmd_info->id = cmd_id;
+       cmd_info->session_id = in_session_id;
+       if (uuid) {
+               /* Keep UUID because it's an 'open session' cmd */
+               size_t i;
+
+               cmd_info->uuid_str[0] = ' ';
+               for (i = 0; i < sizeof(uuid->value); i++) {
+                       snprintf(&cmd_info->uuid_str[1 + i * 2], 3, "%02x",
+                                uuid->value[i]);
+               }
+       } else {
+               cmd_info->uuid_str[0] = '\0';
+       }
+
+       cmd_info->state = PENDING;
+       cmd_info->errno = 0;
+       cmd_info->result = MC_MCP_RET_OK;
+       if (++l_ctx.last_cmds_index >= LAST_CMDS_SIZE)
+               l_ctx.last_cmds_index = 0;
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+
+       mutex_lock(&l_ctx.buffer_mutex);
+       msg = l_ctx.buffer;
+       if (l_ctx.mcp_dead)
+               goto out;
+
+       /* Copy message to MCP buffer */
+       memcpy(msg, cmd, sizeof(*msg));
+
+       /* Send MCP notification, with cmd_id as payload for debug purpose */
+       nq_session_notify(&l_ctx.mcp_session.nq_session, l_ctx.mcp_session.sid,
+                         cmd_id);
+
+       /* Update MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       cmd_info->state = SENT;
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+       ret = wait_mcp_notification();
+       if (ret)
+               goto out;
+
+       /* Check response ID */
+       if (msg->rsp_header.rsp_id != (cmd_id | FLAG_RESPONSE)) {
+               ret = -EBADE;
+               mc_dev_err(ret, "MCP command got invalid response (0x%X)",
+                          msg->rsp_header.rsp_id);
+               goto out;
+       }
+
+       /* Convert result */
+       switch (msg->rsp_header.result) {
+       case MC_MCP_RET_OK:
+               err = 0;
+               break;
+       case MC_MCP_RET_ERR_CLOSE_TASK_FAILED:
+               err = -EAGAIN;
+               break;
+       case MC_MCP_RET_ERR_NO_MORE_SESSIONS:
+               err = -EBUSY;
+               break;
+       case MC_MCP_RET_ERR_OUT_OF_RESOURCES:
+               err = -ENOSPC;
+               break;
+       case MC_MCP_RET_ERR_UNKNOWN_UUID:
+               err = -ENOENT;
+               break;
+       case MC_MCP_RET_ERR_WRONG_PUBLIC_KEY:
+               err = -EKEYREJECTED;
+               break;
+       case MC_MCP_RET_ERR_SERVICE_BLOCKED:
+               err = -ECONNREFUSED;
+               break;
+       case MC_MCP_RET_ERR_SERVICE_LOCKED:
+               err = -ECONNABORTED;
+               break;
+       case MC_MCP_RET_ERR_SERVICE_KILLED:
+               err = -ECONNRESET;
+               break;
+       case MC_MCP_RET_ERR_SYSTEM_NOT_READY:
+               err = -EAGAIN;
+               break;
+       case MC_MCP_RET_ERR_DOWNGRADE_NOT_AUTHORIZED:
+               err = -EPERM;
+               break;
+       default:
+               err = -EPERM;
+       }
+
+       /* Copy response back to caller struct */
+       memcpy(cmd, msg, sizeof(*cmd));
+
+out:
+       /* Update MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       if (ret) {
+               cmd_info->state = FAILED;
+               cmd_info->errno = -ret;
+       } else {
+               cmd_info->state = COMPLETE;
+               cmd_info->errno = -err;
+               cmd_info->result = msg->rsp_header.result;
+               /* For open session: get SID */
+               if (!err && out_session_id)
+                       cmd_info->session_id = *out_session_id;
+       }
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+       mutex_unlock(&l_ctx.buffer_mutex);
+       if (ret) {
+               mc_dev_err(ret, "%s: sending failed", cmd_to_string(cmd_id));
+               return ret;
+       }
+
+       if (err) {
+               if (cmd_id == MC_MCP_CMD_CLOSE_SESSION && err == -EAGAIN)
+                       mc_dev_devel("%s: try again",
+                                    cmd_to_string(cmd_id));
+               else
+                       mc_dev_err(err, "%s: res %d", cmd_to_string(cmd_id),
+                                  msg->rsp_header.result);
+               return err;
+       }
+
+       return 0;
+}
+
+static inline int __mcp_get_version(struct mc_version_info *version_info)
+{
+       union mcp_message cmd;
+       u32 version;
+       int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_mc_get_version(version_info);
+#endif
+
+       version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+                            MCDRVMODULEAPI_VERSION_MINOR);
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_GET_MOBICORE_VERSION;
+       ret = mcp_cmd(&cmd, 0, NULL, NULL);
+       if (ret)
+               return ret;
+
+       memcpy(version_info, &cmd.rsp_get_version.version_info,
+              sizeof(*version_info));
+       /*
+        * The CMP version is meaningless in this case, and is replaced
+        * by the driver's own version.
+        */
+       version_info->version_nwd = version;
+       return 0;
+}
+
+int mcp_get_version(struct mc_version_info *version_info)
+{
+       static struct mc_version_info static_version_info;
+
+       /* If cache empty, get version from the SWd and cache it */
+       if (!static_version_info.version_nwd) {
+               int ret = __mcp_get_version(&static_version_info);
+
+               if (ret)
+                       return ret;
+       }
+
+       /* Copy cached version */
+       memcpy(version_info, &static_version_info, sizeof(*version_info));
+       nq_set_version_ptr(static_version_info.product_id);
+       return 0;
+}
+
+int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *map)
+{
+       union mcp_message cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_LOAD_TOKEN;
+       cmd.cmd_load_token.wsm_data_type = map->type;
+       cmd.cmd_load_token.adr_load_data = map->addr;
+       cmd.cmd_load_token.ofs_load_data = map->offset;
+       cmd.cmd_load_token.len_load_data = map->length;
+       return mcp_cmd(&cmd, 0, NULL, NULL);
+}
+
+int mcp_load_check(const struct tee_object *obj,
+                  const struct mcp_buffer_map *map)
+{
+       const union mclf_header *header;
+       union mcp_message cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_CHECK_LOAD_TA;
+       /* Data */
+       cmd.cmd_check_load.wsm_data_type = map->type;
+       cmd.cmd_check_load.adr_load_data = map->addr;
+       cmd.cmd_check_load.ofs_load_data = map->offset;
+       cmd.cmd_check_load.len_load_data = map->length;
+       /* Header */
+       header = (union mclf_header *)(obj->data + obj->header_length);
+       cmd.cmd_check_load.uuid = header->mclf_header_v2.uuid;
+       return mcp_cmd(&cmd, 0, NULL, &cmd.cmd_check_load.uuid);
+}
+
+int mcp_open_session(struct mcp_session *session, struct mcp_open_info *info,
+                    bool *tci_in_use)
+{
+       static DEFINE_MUTEX(local_mutex);
+       struct tee_object *obj;
+       const union mclf_header *header;
+       struct tee_mmu *obj_mmu;
+       struct mcp_buffer_map obj_map;
+       union mcp_message cmd;
+       int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu()) {
+               ret = xen_mc_open_session(session, info);
+               if (ret)
+                       return ret;
+
+               /* Add to list of sessions */
+               mutex_lock(&l_ctx.sessions_lock);
+               list_add_tail(&session->list, &l_ctx.sessions);
+               mutex_unlock(&l_ctx.sessions_lock);
+               return 0;
+       }
+#endif
+
+       /* Create 'blob' */
+       if (info->type == TEE_MC_UUID) {
+               /* Get TA from registry */
+               obj = tee_object_get(info->uuid, false);
+               /* Tell SWd to load TA from SFS as not in registry */
+               if (IS_ERR(obj) && (PTR_ERR(obj) == -ENOENT))
+                       obj = tee_object_select(info->uuid);
+       } else if (info->type == TEE_MC_DRIVER_UUID) {
+               /* Load driver using only uuid */
+               obj = tee_object_select(info->uuid);
+               *tci_in_use = false;
+       } else if (info->user) {
+               /* Create secure object from user-space trustlet binary */
+               obj = tee_object_read(info->spid, info->va, info->len);
+       } else {
+               /* Create secure object from kernel-space trustlet binary */
+               obj = tee_object_copy(info->va, info->len);
+       }
+
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       /* Header */
+       header = (const union mclf_header *)(&obj->data[obj->header_length]);
+       if (info->type == TEE_MC_DRIVER &&
+           (header->mclf_header_v2.flags &
+                       MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE))
+               *tci_in_use = false;
+
+       /* Create mapping for blob (allocated by driver, so task = NULL) */
+       {
+               struct mc_ioctl_buffer buf = {
+                       .va = (uintptr_t)obj->data,
+                       .len = obj->length,
+                       .flags = MC_IO_MAP_INPUT,
+               };
+
+               obj_mmu = tee_mmu_create(NULL, &buf);
+               if (IS_ERR(obj_mmu)) {
+                       ret = PTR_ERR(obj_mmu);
+                       goto err_mmu;
+               }
+
+               tee_mmu_buffer(obj_mmu, &obj_map);
+       }
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_OPEN_SESSION;
+       /* Data */
+       cmd.cmd_open.uuid = header->mclf_header_v2.uuid;
+       cmd.cmd_open.wsm_data_type = obj_map.type;
+       cmd.cmd_open.adr_load_data = obj_map.addr;
+       cmd.cmd_open.ofs_load_data = obj_map.offset;
+       cmd.cmd_open.len_load_data = obj_map.length;
+       /* Buffer */
+       if (*tci_in_use) {
+               struct mcp_buffer_map map;
+
+               tee_mmu_buffer(info->tci_mmu, &map);
+               cmd.cmd_open.wsmtype_tci = map.type;
+               cmd.cmd_open.adr_tci_buffer = map.addr;
+               cmd.cmd_open.ofs_tci_buffer = map.offset;
+               cmd.cmd_open.len_tci_buffer = map.length;
+       } else {
+               cmd.cmd_open.wsmtype_tci = WSM_INVALID;
+       }
+
+       /* Reset unexpected notification */
+       mutex_lock(&local_mutex);
+       l_ctx.unexp_notif.session_id = SID_MCP; /* Cannot be */
+       cmd.cmd_open.cmd_open_data.mclf_magic = MC_GP_CLIENT_AUTH_MAGIC;
+
+       /* Send MCP open command */
+       ret = mcp_cmd(&cmd, 0, &cmd.rsp_open.session_id, &cmd.cmd_open.uuid);
+       /* Make sure we have a valid session ID */
+       if (!ret && !cmd.rsp_open.session_id)
+               ret = -EBADE;
+
+       if (!ret) {
+               session->sid = cmd.rsp_open.session_id;
+               /* Add to list of sessions */
+               mutex_lock(&l_ctx.sessions_lock);
+               list_add_tail(&session->list, &l_ctx.sessions);
+               mutex_unlock(&l_ctx.sessions_lock);
+               /* Check for spurious notification */
+               mutex_lock(&l_ctx.unexp_notif_mutex);
+               if (l_ctx.unexp_notif.session_id == session->sid) {
+                       mutex_lock(&session->exit_code_lock);
+                       session->exit_code = l_ctx.unexp_notif.payload;
+                       mutex_unlock(&session->exit_code_lock);
+                       nq_session_state_update(&session->nq_session,
+                                               NQ_NOTIF_RECEIVED);
+                       complete(&session->completion);
+               }
+
+               mutex_unlock(&l_ctx.unexp_notif_mutex);
+       }
+
+       mutex_unlock(&local_mutex);
+
+       /* Blob for UUID/TA not needed as re-mapped by the SWd */
+       tee_mmu_put(obj_mmu);
+
+err_mmu:
+       /* Delete secure object */
+       tee_object_free(obj);
+
+       return ret;
+}
+
+/*
+ * Legacy and GP TAs close differently:
+ * - GP TAs always send a notification with payload, whether on close or crash
+ * - Legacy TAs only send a notification with payload on crash
+ * - GP TAs may take time to close, and we get -EAGAIN back from mcp_cmd
+ * - Legacy TAs always close when asked, unless they are driver in which case
+ *   they just don't close at all
+ */
+int mcp_close_session(struct mcp_session *session)
+{
+       union mcp_message cmd;
+       int ret;
+
+       if (is_xen_domu()) {
+#ifdef TRUSTONIC_XEN_DOMU
+               ret = xen_mc_close_session(session);
+#endif
+       } else {
+               /* Signal a potential waiter that SWd session is going away */
+               complete(&session->completion);
+               /* Send MCP command */
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.cmd_header.cmd_id = MC_MCP_CMD_CLOSE_SESSION;
+               cmd.cmd_close.session_id = session->sid;
+               ret = mcp_cmd(&cmd, cmd.cmd_close.session_id, NULL, NULL);
+       }
+
+       mutex_lock(&l_ctx.sessions_lock);
+       if (!ret) {
+               session->state = MCP_SESSION_CLOSED;
+               list_del(&session->list);
+               nq_session_exit(&session->nq_session);
+       } else {
+               /* Something is not right, assume session is still running */
+               session->state = MCP_SESSION_CLOSE_FAILED;
+       }
+       mutex_unlock(&l_ctx.sessions_lock);
+       mc_dev_devel("close session %x ret %d state %s",
+                    session->sid, ret, state_to_string(session->state));
+       return ret;
+}
+
+/*
+ * Session is to be removed from NWd records as SWd has been wiped clean
+ */
+void mcp_cleanup_session(struct mcp_session *session)
+{
+       mutex_lock(&l_ctx.sessions_lock);
+       session->state = MCP_SESSION_CLOSED;
+       list_del(&session->list);
+       nq_session_exit(&session->nq_session);
+       mutex_unlock(&l_ctx.sessions_lock);
+}
+
+int mcp_map(u32 session_id, struct tee_mmu *mmu, u32 *sva)
+{
+       struct mcp_buffer_map map;
+       union mcp_message cmd;
+       int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_mc_map(session_id, mmu, sva);
+#endif
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_MAP;
+       cmd.cmd_map.session_id = session_id;
+       tee_mmu_buffer(mmu, &map);
+       cmd.cmd_map.wsm_type = map.type;
+       cmd.cmd_map.adr_buffer = map.addr;
+       cmd.cmd_map.ofs_buffer = map.offset;
+       cmd.cmd_map.len_buffer = map.length;
+       cmd.cmd_map.flags = map.flags;
+       ret = mcp_cmd(&cmd, session_id, NULL, NULL);
+       if (!ret) {
+               *sva = cmd.rsp_map.secure_va;
+               atomic_inc(&g_ctx.c_maps);
+       }
+
+       return ret;
+}
+
+int mcp_unmap(u32 session_id, const struct mcp_buffer_map *map)
+{
+       union mcp_message cmd;
+       int ret;
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_mc_unmap(session_id, map);
+#endif
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_UNMAP;
+       cmd.cmd_unmap.session_id = session_id;
+       cmd.cmd_unmap.wsm_type = map->type;
+       cmd.cmd_unmap.virtual_buffer_len = map->length;
+       cmd.cmd_unmap.secure_va = map->secure_va;
+       ret = mcp_cmd(&cmd, session_id, NULL, NULL);
+       if (!ret)
+               atomic_dec(&g_ctx.c_maps);
+
+       return ret;
+}
+
+static int mcp_close(void)
+{
+       union mcp_message cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd_header.cmd_id = MC_MCP_CMD_CLOSE_MCP;
+       return mcp_cmd(&cmd, 0, NULL, NULL);
+}
+
+int mcp_notify(struct mcp_session *session)
+{
+       if (l_ctx.mcp_dead)
+               return -EHOSTUNREACH;
+
+       if (session->sid == SID_MCP)
+               mc_dev_devel("notify MCP");
+       else
+               mc_dev_devel("notify session %x", session->sid);
+
+#ifdef TRUSTONIC_XEN_DOMU
+       if (is_xen_domu())
+               return xen_mc_notify(session);
+#endif
+
+       /* Put notif_count as payload for debug purpose */
+       return nq_session_notify(&session->nq_session, session->sid,
+                                ++session->notif_count);
+}
+
+static inline void session_notif_handler(struct mcp_session *session, u32 id,
+                                        u32 payload)
+{
+       mutex_lock(&l_ctx.sessions_lock);
+       mc_dev_devel("MCP notif from session %x exit code %d state %d",
+                    id, payload, session ? session->state : -1);
+       if (session) {
+               /* TA has terminated */
+               if (payload) {
+                       /* Update exit code, or not */
+                       mutex_lock(&session->exit_code_lock);
+                       session->exit_code = payload;
+                       mutex_unlock(&session->exit_code_lock);
+               }
+
+               nq_session_state_update(&session->nq_session,
+                                       NQ_NOTIF_RECEIVED);
+
+               /* Unblock waiter */
+               complete(&session->completion);
+       }
+       mutex_unlock(&l_ctx.sessions_lock);
+
+       /* Unknown session, probably being started */
+       if (!session) {
+               mutex_lock(&l_ctx.unexp_notif_mutex);
+               l_ctx.unexp_notif.session_id = id;
+               l_ctx.unexp_notif.payload = payload;
+               mutex_unlock(&l_ctx.unexp_notif_mutex);
+       }
+}
+
+static void mcp_notif_handler(u32 id, u32 payload)
+{
+       if (id == SID_MCP) {
+               /* MCP notification */
+               mc_dev_devel("notification from MCP");
+               complete(&l_ctx.complete);
+       } else {
+               /* Session notification */
+               struct mcp_session *session = NULL, *candidate;
+
+               mutex_lock(&l_ctx.sessions_lock);
+               list_for_each_entry(candidate, &l_ctx.sessions, list) {
+                       if (candidate->sid == id) {
+                               session = candidate;
+                               break;
+                       }
+               }
+               mutex_unlock(&l_ctx.sessions_lock);
+
+               /* session is NULL if id not found */
+               session_notif_handler(session, id, payload);
+       }
+}
+
+static int debug_sessions(struct kasnprintf_buf *buf)
+{
+       struct mcp_session *session;
+       int ret;
+
+       /* Header */
+       ret = kasnprintf(buf, "%20s %4s %-15s %-11s %4s\n",
+                        "CPU clock", "ID", "state", "notif state", "ec");
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&l_ctx.sessions_lock);
+       list_for_each_entry(session, &l_ctx.sessions, list) {
+               const char *state_str;
+               u64 cpu_clk;
+               s32 err;
+
+               state_str = nq_session_state(&session->nq_session, &cpu_clk);
+               mcp_get_err(session, &err);
+               ret = kasnprintf(buf, "%20llu %4x %-15s %-11s %4d\n", cpu_clk,
+                                session->sid, state_to_string(session->state),
+                                state_str, err);
+               if (ret < 0)
+                       break;
+       }
+       mutex_unlock(&l_ctx.sessions_lock);
+       return ret;
+}
+
+static ssize_t debug_sessions_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       return debug_generic_read(file, user_buf, count, ppos,
+                                 debug_sessions);
+}
+
+static const struct file_operations debug_sessions_ops = {
+       .read = debug_sessions_read,
+       .llseek = default_llseek,
+       .open = debug_generic_open,
+       .release = debug_generic_release,
+};
+
+static inline int show_log_entry(struct kasnprintf_buf *buf,
+                                struct command_info *cmd_info)
+{
+       const char *state_str = "unknown";
+
+       switch (cmd_info->state) {
+       case UNUSED:
+               state_str = "unused";
+               break;
+       case PENDING:
+               state_str = "pending";
+               break;
+       case SENT:
+               state_str = "sent";
+               break;
+       case COMPLETE:
+               state_str = "complete";
+               break;
+       case FAILED:
+               state_str = "failed";
+               break;
+       }
+
+       return kasnprintf(buf, "%20llu %5d %-16s %5x %-8s %5d %6d%s\n",
+                         cmd_info->cpu_clk, cmd_info->pid,
+                         cmd_to_string(cmd_info->id), cmd_info->session_id,
+                         state_str, cmd_info->errno, cmd_info->result,
+                         cmd_info->uuid_str);
+}
+
+static int debug_last_cmds(struct kasnprintf_buf *buf)
+{
+       struct command_info *cmd_info;
+       int i, ret = 0;
+
+       /* Initialize MCP log */
+       mutex_lock(&l_ctx.last_cmds_mutex);
+       ret = kasnprintf(buf, "%20s %5s %-16s %5s %-8s %5s %6s %s\n",
+                        "CPU clock", "PID", "command", "S-ID",
+                        "state", "errno", "result", "UUID");
+       if (ret < 0)
+               goto out;
+
+       cmd_info = &l_ctx.last_cmds[l_ctx.last_cmds_index];
+       if (cmd_info->state != UNUSED)
+               /* Buffer has wrapped around, dump end (oldest records) */
+               for (i = l_ctx.last_cmds_index; i < LAST_CMDS_SIZE; i++) {
+                       ret = show_log_entry(buf, cmd_info++);
+                       if (ret < 0)
+                               goto out;
+               }
+
+       /* Dump first records */
+       cmd_info = &l_ctx.last_cmds[0];
+       for (i = 0; i < l_ctx.last_cmds_index; i++) {
+               ret = show_log_entry(buf, cmd_info++);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       mutex_unlock(&l_ctx.last_cmds_mutex);
+       return ret;
+}
+
+static ssize_t debug_last_cmds_read(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       return debug_generic_read(file, user_buf, count, ppos, debug_last_cmds);
+}
+
+static const struct file_operations debug_last_cmds_ops = {
+       .read = debug_last_cmds_read,
+       .llseek = default_llseek,
+       .open = debug_generic_open,
+       .release = debug_generic_release,
+};
+
+int mcp_init(void)
+{
+       l_ctx.buffer = nq_get_mcp_buffer();
+       mutex_init(&l_ctx.buffer_mutex);
+       init_completion(&l_ctx.complete);
+       /* Setup notification queue mutex */
+       mcp_session_init(&l_ctx.mcp_session);
+       l_ctx.mcp_session.sid = SID_MCP;
+       mutex_init(&l_ctx.unexp_notif_mutex);
+       INIT_LIST_HEAD(&l_ctx.sessions);
+       mutex_init(&l_ctx.sessions_lock);
+       mutex_init(&l_ctx.last_cmds_mutex);
+
+       l_ctx.timeout_period = MCP_TIMEOUT;
+
+       nq_register_notif_handler(mcp_notif_handler, false);
+       l_ctx.tee_stop_notifier.notifier_call = tee_stop_notifier_fn;
+       nq_register_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+
+       return 0;
+}
+
+void mcp_exit(void)
+{
+       mark_mcp_dead();
+       nq_unregister_tee_stop_notifier(&l_ctx.tee_stop_notifier);
+}
+
+int mcp_start(void)
+{
+       /* Create debugfs sessions and last commands entries */
+       debugfs_create_file("sessions", 0400, g_ctx.debug_dir, NULL,
+                           &debug_sessions_ops);
+       debugfs_create_file("last_mcp_commands", 0400, g_ctx.debug_dir, NULL,
+                           &debug_last_cmds_ops);
+       debugfs_create_u32("mcp_timeout", 0600, g_ctx.debug_dir,
+                          &l_ctx.timeout_period);
+       return 0;
+}
+
+void mcp_stop(void)
+{
+       mcp_close();
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mcp.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mcp.h
new file mode 100755 (executable)
index 0000000..436e9f5
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_MCP_H_
+#define _MC_MCP_H_
+
+#include "mci/mcloadformat.h"          /* struct identity */
+#include "nq.h"
+
+struct tee_mmu;
+
+/* Structure to hold the TA/driver information at open */
+struct mcp_open_info {
+       enum {
+               TEE_MC_UUID,
+               TEE_MC_TA,
+               TEE_MC_DRIVER,
+               TEE_MC_DRIVER_UUID,
+       }       type;
+       /* TA/driver */
+       const struct mc_uuid_t  *uuid;
+       u32                     spid;
+       uintptr_t               va;
+       size_t                  len;
+       /* TCI */
+       uintptr_t               tci_va;
+       size_t                  tci_len;
+       struct tee_mmu          *tci_mmu;
+       /* Origin */
+       bool                    user;
+};
+
+/* Structure to hold the TA/driver descriptor to pass to MCP */
+struct tee_object {
+       u32     length;         /* Total length */
+       u32     header_length;  /* Length of header before payload */
+       u8      data[];         /* Header followed by payload */
+};
+
+/* Structure to hold all mapped buffer data to pass to MCP */
+struct mcp_buffer_map {
+       u64             addr;           /** Page-aligned PA, or VA */
+       unsigned long   nr_pages;       /** Total number of pages mapped */
+       u32             secure_va;      /** SWd virtual address */
+       u32             offset;         /** Data offset inside the first page */
+       u32             length;         /** Length of the data */
+       u32             type;           /** Type of MMU */
+       u32             flags;          /** Flags (typically read/write) */
+       struct tee_mmu  *mmu;           /** MMU from which the map was made */
+};
+
+struct mcp_session {
+       /* Notification queue session */
+       struct nq_session       nq_session;
+       /* Session ID */
+       u32                     sid;
+       /* Sessions list (protected by mcp sessions_lock) */
+       struct list_head        list;
+       /* Notification waiter lock */
+       struct mutex            notif_wait_lock;        /* Only one at a time */
+       /* Notification received */
+       struct completion       completion;
+       /* Notification lock */
+       struct mutex            exit_code_lock;
+       /* Last notification */
+       s32                     exit_code;
+       /* Session state (protected by mcp sessions_lock) */
+       enum mcp_session_state {
+               MCP_SESSION_RUNNING,
+               MCP_SESSION_CLOSE_FAILED,
+               MCP_SESSION_CLOSED,
+       }                       state;
+       /* Notification counter */
+       u32                     notif_count;
+};
+
+/* Init for the mcp_session structure */
+void mcp_session_init(struct mcp_session *session);
+
+/* Commands */
+int mcp_get_version(struct mc_version_info *version_info);
+int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *buffer_map);
+int mcp_load_check(const struct tee_object *obj,
+                  const struct mcp_buffer_map *buffer_map);
+int mcp_open_session(struct mcp_session *session, struct mcp_open_info *info,
+                    bool *tci_in_use);
+int mcp_close_session(struct mcp_session *session);
+void mcp_cleanup_session(struct mcp_session *session);
+int mcp_map(u32 session_id, struct tee_mmu *mmu, u32 *sva);
+int mcp_unmap(u32 session_id, const struct mcp_buffer_map *map);
+int mcp_notify(struct mcp_session *mcp_session);
+int mcp_wait(struct mcp_session *session, s32 timeout, bool silent_expiry);
+int mcp_get_err(struct mcp_session *session, s32 *err);
+
+/* Initialisation/cleanup */
+int mcp_init(void);
+void mcp_exit(void);
+int mcp_start(void);
+void mcp_stop(void);
+
+#endif /* _MC_MCP_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mmu.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/mmu.c
new file mode 100644 (file)
index 0000000..58e1f0f
--- /dev/null
@@ -0,0 +1,658 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/pgtable.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+
+#ifdef CONFIG_XEN
+/* To get the MFN */
+#include <linux/pfn.h>
+#include <xen/page.h>
+#endif
+
+#include "public/mc_user.h"
+
+#include "mci/mcimcp.h"
+
+#include "main.h"
+#include "mcp.h"       /* mcp_buffer_map */
+#include "mmu.h"
+
+#define PHYS_48BIT_MASK (BIT(48) - 1)
+
+/* Common */
+#define MMU_BUFFERABLE         BIT(2)          /* AttrIndx[0] */
+#define MMU_CACHEABLE          BIT(3)          /* AttrIndx[1] */
+#define MMU_EXT_NG             BIT(11)         /* ARMv6 and higher */
+
+/* LPAE */
+#define MMU_TYPE_PAGE          (3 << 0)
+#define MMU_NS                 BIT(5)
+#define MMU_AP_RW_ALL          BIT(6) /* AP[2:1], RW, at any privilege level */
+#define        MMU_AP2_RO              BIT(7)
+#define MMU_EXT_SHARED_64      (3 << 8)        /* SH[1:0], inner shareable */
+#define MMU_EXT_AF             BIT(10)         /* Access Flag */
+#define MMU_EXT_XN             (((u64)1) << 54) /* XN */
+
+/* Non-LPAE */
+#define MMU_TYPE_EXT           (3 << 0)        /* v5 */
+#define MMU_TYPE_SMALL         (2 << 0)
+#define MMU_EXT_AP0            BIT(4)
+#define MMU_EXT_AP1            (2 << 4)
+#define MMU_EXT_AP2            BIT(9)
+#define MMU_EXT_TEX(x)         ((x) << 6)      /* v5 */
+#define MMU_EXT_SHARED_32      BIT(10)         /* ARMv6 and higher */
+
+/* ION */
+/* Trustonic Specific flag to detect ION mem */
+#define MMU_ION_BUF            BIT(24)
+
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
+static inline long gup_local(struct mm_struct *mm, uintptr_t start,
+                            unsigned long nr_pages, int write,
+                            struct page **pages)
+{
+       return get_user_pages(NULL, mm, start, nr_pages, write, 0, pages, NULL);
+}
+#elif KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
+static inline long gup_local(struct mm_struct *mm, uintptr_t start,
+                            unsigned long nr_pages, int write,
+                            struct page **pages)
+{
+       unsigned int flags = 0;
+
+       if (write)
+               flags |= FOLL_WRITE;
+
+       /* ExySp */
+       flags |= FOLL_CMA;
+
+       return get_user_pages_remote(NULL, mm, start, nr_pages, write, 0, pages,
+                                    NULL);
+}
+#elif KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+static inline long gup_local(struct mm_struct *mm, uintptr_t start,
+                            unsigned long nr_pages, int write,
+                            struct page **pages)
+{
+       unsigned int flags = 0;
+
+       if (write)
+               flags |= FOLL_WRITE;
+
+       /* ExySp */
+       flags |= FOLL_CMA;
+
+       return get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages,
+                                    NULL);
+}
+#else
+static inline long gup_local(struct mm_struct *mm, uintptr_t start,
+                            unsigned long nr_pages, int write,
+                            struct page **pages)
+{
+       unsigned int flags = 0;
+
+       if (write)
+               flags |= FOLL_WRITE;
+
+       /* ExySp */
+       /* flags |= FOLL_CMA; */
+
+       return get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages,
+                                    NULL, NULL);
+}
+#endif
+
+/*
+ * A table that could be either a pmd or pte
+ */
+union mmu_table {
+       u64             *entries;       /* Array of PTEs */
+       /* Array of pages */
+       struct page     **pages;
+       /* Array of VAs */
+       uintptr_t       *vas;
+       /* Address of table */
+       void            *addr;
+       /* Page for table */
+       unsigned long   page;
+};
+
+/*
+ * MMU table allocated to the Daemon or a TLC describing a world shared
+ * buffer.
+ * When users map a malloc()ed area into SWd, a MMU table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the MMU table and a handle for this table is returned to the user.
+ */
+struct tee_mmu {
+       struct kref                     kref;
+       /* Array of pages that hold buffer ptes*/
+       union mmu_table                 pte_tables[PMD_ENTRIES_MAX];
+       /* Actual number of ptes tables */
+       size_t                          nr_pmd_entries;
+       /* Contains phys @ of ptes tables */
+       union mmu_table                 pmd_table;
+       struct tee_deleter              *deleter;       /* Xen map to free */
+       unsigned long                   nr_pages;
+       int                             pages_created;  /* Leak check */
+       int                             pages_locked;   /* Leak check */
+       u32                             offset;
+       u32                             length;
+       u32                             flags;
+       /* Pages are from user space */
+       bool                            user;
+       bool                            use_pages_and_vas;
+       /* ION case only */
+       struct dma_buf                  *dma_buf;
+       struct dma_buf_attachment       *attach;
+       struct sg_table                 *sgt;
+};
+
+static void tee_mmu_delete(struct tee_mmu *mmu)
+{
+       unsigned long chunk, nr_pages_left = mmu->nr_pages;
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+       if (mmu->dma_buf) {
+               dma_buf_unmap_attachment(mmu->attach, mmu->sgt,
+                                        DMA_BIDIRECTIONAL);
+               dma_buf_detach(mmu->dma_buf, mmu->attach);
+               dma_buf_put(mmu->dma_buf);
+       }
+#endif
+
+       /* Release all locked user space pages */
+       for (chunk = 0; chunk < mmu->nr_pmd_entries; chunk++) {
+               union mmu_table *pte_table = &mmu->pte_tables[chunk];
+               unsigned long nr_pages = nr_pages_left;
+
+               if (nr_pages > PTE_ENTRIES_MAX)
+                       nr_pages = PTE_ENTRIES_MAX;
+
+               nr_pages_left -= nr_pages;
+
+               if (!pte_table->page)
+                       break;
+
+               if (mmu->user && mmu->use_pages_and_vas) {
+                       struct page **page = pte_table->pages;
+                       int i;
+
+                       for (i = 0; i < nr_pages; i++, page++)
+                               put_page(*page);
+
+                       mmu->pages_locked -= nr_pages;
+               } else if (mmu->user) {
+                       u64 *pte64 = pte_table->entries;
+                       pte_t pte;
+                       int i;
+
+                       for (i = 0; i < nr_pages; i++) {
+#if (KERNEL_VERSION(4, 7, 0) > LINUX_VERSION_CODE) || defined(CONFIG_ARM)
+                               {
+                                       pte = *pte64++;
+                                       /* Unused entries are 0 */
+                                       if (!pte)
+                                               break;
+                               }
+#else
+                               {
+                                       pte.pte = *pte64++;
+                                       /* Unused entries are 0 */
+                                       if (!pte.pte)
+                                               break;
+                               }
+#endif
+
+                               /* pte_page() cannot return NULL */
+                               put_page(pte_page(pte));
+                       }
+
+                       mmu->pages_locked -= nr_pages;
+               }
+
+               free_page(pte_table->page);
+               mmu->pages_created--;
+       }
+
+       if (mmu->pmd_table.page) {
+               free_page(mmu->pmd_table.page);
+               mmu->pages_created--;
+       }
+
+       if (mmu->pages_created || mmu->pages_locked)
+               mc_dev_err(-EUCLEAN,
+                          "leak detected: still in use %d, still locked %d",
+                          mmu->pages_created, mmu->pages_locked);
+
+       if (mmu->deleter)
+               mmu->deleter->delete(mmu->deleter->object);
+
+       kfree(mmu);
+
+       /* Decrement debug counter */
+       atomic_dec(&g_ctx.c_mmus);
+}
+
+static struct tee_mmu *tee_mmu_create_common(const struct mcp_buffer_map *b_map)
+{
+       struct tee_mmu *mmu;
+       int ret = -ENOMEM;
+
+       if (b_map->nr_pages > (PMD_ENTRIES_MAX * PTE_ENTRIES_MAX)) {
+               ret = -EINVAL;
+               mc_dev_err(ret, "data mapping exceeds %d pages: %lu",
+                          PMD_ENTRIES_MAX * PTE_ENTRIES_MAX, b_map->nr_pages);
+               return ERR_PTR(ret);
+       }
+
+       /* Allocate the struct */
+       mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+       if (!mmu)
+               return ERR_PTR(-ENOMEM);
+
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_mmus);
+       kref_init(&mmu->kref);
+
+       /* The Xen front-end does not use PTEs */
+       if (is_xen_domu())
+               mmu->use_pages_and_vas = true;
+
+       /* Buffer info */
+       mmu->offset = b_map->offset;
+       mmu->length = b_map->length;
+       mmu->flags = b_map->flags;
+
+       /* Pages info */
+       mmu->nr_pages = b_map->nr_pages;
+       mmu->nr_pmd_entries = (mmu->nr_pages + PTE_ENTRIES_MAX - 1) /
+                           PTE_ENTRIES_MAX;
+       mc_dev_devel("mmu->nr_pages %lu num_ptes_pages %zu",
+                    mmu->nr_pages, mmu->nr_pmd_entries);
+
+       /* Allocate a page for the L1 table, always used for DomU */
+       mmu->pmd_table.page = get_zeroed_page(GFP_KERNEL);
+       if (!mmu->pmd_table.page)
+               goto end;
+
+       mmu->pages_created++;
+
+       return mmu;
+
+end:
+       tee_mmu_delete(mmu);
+       return ERR_PTR(ret);
+}
+
+static bool mmu_get_dma_buffer(struct tee_mmu *mmu, int va)
+{
+#ifdef CONFIG_DMA_SHARED_BUFFER
+       struct dma_buf *buf;
+
+       buf = dma_buf_get(va);
+       if (IS_ERR(buf))
+               return false;
+
+       mmu->dma_buf = buf;
+       mmu->attach = dma_buf_attach(mmu->dma_buf, g_ctx.mcd);
+       if (IS_ERR(mmu->attach))
+               goto err_attach;
+
+       mmu->sgt = dma_buf_map_attachment(mmu->attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(mmu->sgt))
+               goto err_map;
+
+       return true;
+
+err_map:
+       dma_buf_detach(mmu->dma_buf, mmu->attach);
+
+err_attach:
+       dma_buf_put(mmu->dma_buf);
+#endif
+       return false;
+}
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct tee_mmu *tee_mmu_create(struct mm_struct *mm,
+                              const struct mc_ioctl_buffer *buf)
+{
+       struct tee_mmu  *mmu;
+       const void      *data = (const void *)(uintptr_t)buf->va;
+       const void      *reader = (const void *)((uintptr_t)data & PAGE_MASK);
+       struct page     **pages;        /* Same as below, conveniently typed */
+       unsigned long   pages_page = 0; /* Page to contain the page pointers */
+       unsigned long   chunk;
+       struct mcp_buffer_map b_map = {
+               .offset = (u32)(buf->va & ~PAGE_MASK),
+               .length = buf->len,
+               .flags = buf->flags,
+       };
+       bool            writeable = buf->flags & MC_IO_MAP_OUTPUT;
+       int             ret = 0;
+
+#ifndef CONFIG_DMA_SHARED_BUFFER
+       if (buf->flags & MMU_ION_BUF) {
+               mc_dev_err(-EINVAL, "ION buffers not supported by kernel");
+               return ERR_PTR(-EINVAL);
+       }
+#endif
+
+       /* Check input arguments */
+       if (!(buf->flags & MMU_ION_BUF) && !buf->va)
+               return ERR_PTR(-EINVAL);
+
+       if (buf->flags & MMU_ION_BUF)
+               /* buf->va is not a valid address. ION buffers are aligned */
+               b_map.offset = 0;
+
+       /* Allocate the struct */
+       b_map.nr_pages = PAGE_ALIGN(b_map.offset + b_map.length) / PAGE_SIZE;
+       /* Allow Registered Shared mem with valid pointer and zero size. */
+       if (!b_map.nr_pages)
+               b_map.nr_pages = 1;
+
+       mmu = tee_mmu_create_common(&b_map);
+       if (IS_ERR(mmu))
+               return mmu;
+
+       if (buf->flags & MMU_ION_BUF) {
+               mc_dev_devel("Buffer is ION");
+               /* Buffer is ION -
+                * va is the client's dma_buf fd, which should be converted
+                * to a struct sg_table * directly.
+                */
+               if (!mmu_get_dma_buffer(mmu, buf->va)) {
+                       mc_dev_err(ret, "mmu_get_dma_buffer failed");
+                       ret = -EINVAL;
+                       goto end;
+               }
+       }
+       /* Get a page to store page pointers */
+       pages_page = get_zeroed_page(GFP_KERNEL);
+       if (!pages_page) {
+               ret = -ENOMEM;
+               goto end;
+       }
+       mmu->pages_created++;
+
+       pages = (struct page **)pages_page;
+       for (chunk = 0; chunk < mmu->nr_pmd_entries; chunk++) {
+               unsigned long nr_pages;
+               int i;
+
+               /* Size to map for this chunk */
+               if (chunk == (mmu->nr_pmd_entries - 1))
+                       nr_pages = ((mmu->nr_pages - 1) % PTE_ENTRIES_MAX) + 1;
+               else
+                       nr_pages = PTE_ENTRIES_MAX;
+
+               /* Allocate a page to hold ptes that describe buffer pages */
+               mmu->pte_tables[chunk].page = get_zeroed_page(GFP_KERNEL);
+               if (!mmu->pte_tables[chunk].page) {
+                       ret = -ENOMEM;
+                       goto end;
+               }
+               mmu->pages_created++;
+
+               /* Add page address to pmd table if needed */
+               if (mmu->use_pages_and_vas)
+                       mmu->pmd_table.vas[chunk] =
+                               mmu->pte_tables[chunk].page;
+               else
+                       mmu->pmd_table.entries[chunk] =
+                              virt_to_phys(mmu->pte_tables[chunk].addr);
+
+               /* Get pages */
+               if (mmu->dma_buf) {
+                       /* Buffer is ION */
+                       struct sg_mapping_iter miter;
+                       struct page **page_ptr;
+
+                       page_ptr = &pages[0];
+                       sg_miter_start(&miter, mmu->sgt->sgl,
+                                      mmu->sgt->nents,
+                                      SG_MITER_FROM_SG);
+                       while (sg_miter_next(&miter))
+                               *page_ptr++ = miter.page;
+
+                       sg_miter_stop(&miter);
+               } else if (mm) {
+                       long gup_ret;
+
+                       /* Buffer was allocated in user space */
+                       down_read(&mm->mmap_sem);
+                       /*
+                        * Always try to map read/write from a Linux PoV, so
+                        * Linux creates (page faults) the underlying pages if
+                        * missing.
+                        */
+                       gup_ret = gup_local(mm, (uintptr_t)reader,
+                                           nr_pages, 1, pages);
+                       if ((gup_ret == -EFAULT) && !writeable) {
+                               /*
+                                * If mapping read/write fails, and the buffer
+                                * is to be shared as input only, try to map
+                                * again read-only.
+                                */
+                               gup_ret = gup_local(mm, (uintptr_t)reader,
+                                                   nr_pages, 0, pages);
+                       }
+                       up_read(&mm->mmap_sem);
+                       if (gup_ret < 0) {
+                               ret = gup_ret;
+                               mc_dev_err(ret, "failed to get user pages @%p",
+                                          reader);
+                               goto end;
+                       }
+
+                       /* check if we could lock all pages. */
+                       if (gup_ret != nr_pages) {
+                               mc_dev_err((int)gup_ret,
+                                          "failed to get user pages");
+#if KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE
+                               release_pages(pages, gup_ret, 0);
+#else
+                               release_pages(pages, gup_ret);
+#endif
+                               ret = -EINVAL;
+                               goto end;
+                       }
+
+                       reader += nr_pages * PAGE_SIZE;
+                       mmu->user = true;
+                       mmu->pages_locked += nr_pages;
+               } else if (is_vmalloc_addr(data)) {
+                       /* Buffer vmalloc'ed in kernel space */
+                       for (i = 0; i < nr_pages; i++) {
+                               struct page *page = vmalloc_to_page(reader);
+
+                               if (!page) {
+                                       ret = -EINVAL;
+                                       mc_dev_err(ret,
+                                                  "failed to map address");
+                                       goto end;
+                               }
+
+                               pages[i] = page;
+                               reader += PAGE_SIZE;
+                       }
+               } else {
+                       /* Buffer kmalloc'ed in kernel space */
+                       struct page *page = virt_to_page(reader);
+
+                       reader += nr_pages * PAGE_SIZE;
+                       for (i = 0; i < nr_pages; i++)
+                               pages[i] = page++;
+               }
+
+               /* Create Table of physical addresses*/
+               if (mmu->use_pages_and_vas) {
+                       memcpy(mmu->pte_tables[chunk].pages, pages,
+                              nr_pages * sizeof(*pages));
+               } else {
+                       for (i = 0; i < nr_pages; i++) {
+                               mmu->pte_tables[chunk].entries[i] =
+                                               page_to_phys(pages[i]);
+                       }
+               }
+       }
+
+end:
+       if (pages_page) {
+               free_page(pages_page);
+               mmu->pages_created--;
+       }
+
+       if (ret) {
+               tee_mmu_delete(mmu);
+               return ERR_PTR(ret);
+       }
+
+       mc_dev_devel(
+               "created mmu %p: %s va %llx len %u off %u flg %x pmd table %lx",
+               mmu, mmu->user ? "user" : "kernel", buf->va, mmu->length,
+               mmu->offset, mmu->flags, mmu->pmd_table.page);
+       return mmu;
+}
+
+struct tee_mmu *tee_mmu_wrap(struct tee_deleter *deleter, struct page **pages,
+                            const struct mcp_buffer_map *b_map)
+{
+       int ret = -EINVAL;
+#ifdef CONFIG_XEN
+       struct tee_mmu *mmu;
+       unsigned long chunk, nr_pages_left;
+
+       /* Allocate the struct */
+       mmu = tee_mmu_create_common(b_map);
+       if (IS_ERR(mmu))
+               return mmu;
+
+       nr_pages_left = mmu->nr_pages;
+       for (chunk = 0; chunk < mmu->nr_pmd_entries; chunk++) {
+               unsigned long nr_pages = nr_pages_left;
+               u64 *pte;
+               int i;
+
+               if (nr_pages > PTE_ENTRIES_MAX)
+                       nr_pages = PTE_ENTRIES_MAX;
+
+               nr_pages_left -= nr_pages;
+
+               /* Allocate a page to hold ptes that describe buffer pages */
+               mmu->pte_tables[chunk].page = get_zeroed_page(GFP_KERNEL);
+               if (!mmu->pte_tables[chunk].page) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               mmu->pages_created++;
+
+               /* Add page address to pmd table if needed */
+               mmu->pmd_table.entries[chunk] =
+                       virt_to_phys(mmu->pte_tables[chunk].addr);
+
+               /* Convert to PTEs */
+               pte = &mmu->pte_tables[chunk].entries[0];
+
+               for (i = 0; i < nr_pages; i++, pages++, pte++) {
+                       unsigned long phys;
+                       unsigned long pfn;
+
+                       phys = page_to_phys(*pages);
+#if defined CONFIG_ARM64
+                       phys &= PHYS_48BIT_MASK;
+#endif
+                       pfn = PFN_DOWN(phys);
+                       *pte = __pfn_to_mfn(pfn) << PAGE_SHIFT;
+               }
+       }
+
+       mmu->deleter = deleter;
+       mc_dev_devel("wrapped mmu %p: len %u off %u flg %x pmd table %lx",
+                    mmu, mmu->length, mmu->offset, mmu->flags,
+                    mmu->pmd_table.page);
+       return mmu;
+
+err:
+       tee_mmu_delete(mmu);
+#endif
+       return ERR_PTR(ret);
+}
+
+void tee_mmu_set_deleter(struct tee_mmu *mmu, struct tee_deleter *deleter)
+{
+       mmu->deleter = deleter;
+}
+
+static void tee_mmu_release(struct kref *kref)
+{
+       struct tee_mmu *mmu = container_of(kref, struct tee_mmu, kref);
+
+       mc_dev_devel("free mmu %p: %s len %u off %u pmd table %lx",
+                    mmu, mmu->user ? "user" : "kernel", mmu->length,
+                    mmu->offset, mmu->pmd_table.page);
+       tee_mmu_delete(mmu);
+}
+
+void tee_mmu_get(struct tee_mmu *mmu)
+{
+       kref_get(&mmu->kref);
+}
+
+void tee_mmu_put(struct tee_mmu *mmu)
+{
+       kref_put(&mmu->kref, tee_mmu_release);
+}
+
+void tee_mmu_buffer(struct tee_mmu *mmu, struct mcp_buffer_map *map)
+{
+       if (mmu->use_pages_and_vas)
+               map->addr = mmu->pmd_table.page;
+       else
+               map->addr = virt_to_phys(mmu->pmd_table.addr);
+
+       map->secure_va = 0;
+       map->offset = mmu->offset;
+       map->length = mmu->length;
+       map->nr_pages = mmu->nr_pages;
+       map->flags = mmu->flags;
+       map->type = WSM_L1;
+       if (mmu->dma_buf)
+               map->type |= WSM_UNCACHED;
+       map->mmu = mmu;
+}
+
+int tee_mmu_debug_structs(struct kasnprintf_buf *buf, const struct tee_mmu *mmu)
+{
+       return kasnprintf(buf,
+                         "\t\t\tmmu %pK: %s len %u off %u table %pK\n",
+                         mmu, mmu->user ? "user" : "kernel", mmu->length,
+                         mmu->offset, (void *)mmu->pmd_table.page);
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/mmu.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/mmu.h
new file mode 100755 (executable)
index 0000000..84fe426
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TBASE_MEM_H_
+#define _TBASE_MEM_H_
+
+/*
+ * This represents the maximum number of entries in a Page Table Entries
+ * array which maps one 4KiB page. Each entry is 64 bits long physical
+ * address with some possible flags. With 512 entries it is possible
+ * to map 2MiB memory block.
+ */
+#define PTE_ENTRIES_MAX        512
+
+/*
+ * This represents the maximum number of entries in a Page Middle Directory
+ * which maps one 4KiB page. Each entry is a 64 bits physical address that
+ * points to a PTE. With 512 entries t is possible to map 1GB memory block.
+ */
+#define PMD_ENTRIES_MAX        512
+
+struct tee_mmu;
+struct mcp_buffer_map;
+
+struct tee_deleter {
+       void *object;
+       void (*delete)(void *object);
+};
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct tee_mmu *tee_mmu_create(struct mm_struct *mm,
+                              const struct mc_ioctl_buffer *buf);
+
+/*
+ * Allocate MMU table and map pages into it.
+ * This is for Xen Dom0 to re-create a buffer with existing pages.
+ */
+struct tee_mmu *tee_mmu_wrap(struct tee_deleter *deleter, struct page **pages,
+                            const struct mcp_buffer_map *buf);
+
+/*
+ * Give the MMU an object to release when released
+ */
+void tee_mmu_set_deleter(struct tee_mmu *mmu, struct tee_deleter *deleter);
+
+/*
+ * Gets a reference on a MMU table.
+ */
+void tee_mmu_get(struct tee_mmu *mmu);
+
+/*
+ * Puts a reference on a MMU table.
+ */
+void tee_mmu_put(struct tee_mmu *mmu);
+
+/*
+ * Fill in buffer info for MMU table.
+ */
+void tee_mmu_buffer(struct tee_mmu *mmu, struct mcp_buffer_map *map);
+
+/*
+ * Add info to debug buffer.
+ */
+int tee_mmu_debug_structs(struct kasnprintf_buf *buf,
+                         const struct tee_mmu *mmu);
+
+#endif /* _TBASE_MEM_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/nq.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/nq.c
new file mode 100755 (executable)
index 0000000..94a95c5
--- /dev/null
@@ -0,0 +1,1261 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <linux/of_irq.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/clock.h> /* local_clock */
+#endif
+
+#include "platform.h"                  /* CPU-related information */
+
+#include "public/mc_user.h"
+#include "public/mc_linux_api.h"       /* mc_switch_core */
+
+#include "mci/mcifc.h"
+#include "mci/mciiwp.h"
+#include "mci/mcimcp.h"
+#include "mci/mcinq.h"
+#include "mci/mcitime.h"               /* struct mcp_time */
+
+#include "main.h"
+#include "clock.h"
+#include "fastcall.h"
+#include "logging.h"
+#include "nq.h"
+
+#define NQ_NUM_ELEMS           64
+#define SCHEDULING_FREQ                5       /**< N-SIQ every n-th time */
+#define DEFAULT_TIMEOUT_MS     20000   /* We do nothing on timeout anyway */
+
+/* If not forced by platform header, use defaults below */
+
+#ifndef CPU_IDS
+#define CPU_IDS { 0x0000, 0x0001, 0x0002, 0x0003, \
+                 0x0100, 0x0101, 0x0102, 0x0103, \
+                 0x0200, 0x0201, 0x0202, 0x0203 }
+#endif
+
+static const u32 cpu_ids[] = CPU_IDS;
+
+static struct {
+       struct mutex buffer_mutex;      /* Lock on SWd communication buffer */
+       struct mcp_buffer *mcp_buffer;
+       struct interworld_session *iwp_buffer;
+       struct task_struct *irq_bh_thread;
+       struct completion irq_bh_complete;
+       bool irq_bh_thread_run;
+       int irq;
+       struct blocking_notifier_head tee_stop_notifiers;
+       void (*mcp_notif_handler)(u32 id, u32 payload);
+       void (*iwp_notif_handler)(u32 id, u32 payload);
+       /* MobiCore MCI information */
+       unsigned int order;
+       union {
+               void            *mci;
+               struct {
+                       struct notification_queue *tx;
+                       struct notification_queue *rx;
+               } nq;
+       };
+       /*
+        * This notifications list is to be used to queue notifications when the
+        * notification queue overflows, so no session gets its notification
+        * lost, especially MCP.
+        */
+       struct mutex            notifications_mutex;
+       struct list_head        notifications;
+       /* Dump buffer */
+       char                    *tee_version;
+       struct kasnprintf_buf   dump;
+       /* Time */
+       struct mcp_time         *time;
+
+       /* Scheduler */
+       int                     active_cpu;     /* We always start on CPU #0 */
+       int                     next_cpu;       /* If core switch required */
+       struct task_struct      *tee_scheduler_thread;
+       bool                    tee_scheduler_run;
+       bool                    tee_hung;
+       int                     boot_ret;
+       struct completion       boot_complete;  /* Signal end of boot */
+       struct completion       idle_complete;  /* Unblock scheduler thread */
+       struct completion       sleep_complete; /* Wait for sleep status */
+       struct mutex            sleep_mutex;    /* Protect sleep request */
+       struct mutex            request_mutex;  /* Protect all below */
+       /* The order of this enum matters */
+       enum sched_command {
+               NONE,           /* No specific request */
+               YIELD,          /* Run the SWd */
+               NSIQ,           /* Schedule the SWd */
+               SUSPEND,        /* Suspend the SWd */
+               RESUME,         /* Resume the SWd */
+       }                       request;
+       bool                    suspended;
+
+       /* Logging */
+       phys_addr_t             log_buffer;
+       u32                     log_buffer_size;
+       bool                    log_buffer_busy;
+} l_ctx;
+
+#ifdef MC_SMC_FASTCALL
+static inline int nq_set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+       return 0;
+}
+#else /* MC_SMC_FASTCALL */
+static inline int nq_set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+       return set_cpus_allowed_ptr(p, &new_mask);
+}
+#endif /* ! MC_SMC_FASTCALL */
+
+static inline int switch_to_online_core(int dying_cpu)
+{
+       int cpu;
+
+       if (l_ctx.active_cpu != dying_cpu) {
+               mc_dev_devel("not active CPU, no action taken");
+               return 0;
+       }
+
+       /* Chose the first online CPU and switch! */
+       for_each_online_cpu(cpu) {
+               if (cpu != dying_cpu) {
+                       mc_dev_info("CPU #%d is dying, switching to CPU #%d",
+                                   dying_cpu, cpu);
+                       return mc_switch_core(cpu);
+               }
+
+               mc_dev_devel("skipping CPU #%d", dying_cpu);
+       }
+
+       return 0;
+}
+
+#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+static int cpu_notifer_callback(struct notifier_block *nfb,
+                               unsigned long action, void *hcpu)
+{
+       int cpu = (int)(uintptr_t)hcpu;
+
+       switch (action) {
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               mc_dev_devel("CPU #%d is going to die", cpu);
+               switch_to_online_core(cpu);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_notifer = {
+       .notifier_call = cpu_notifer_callback,
+};
+#else
+static int nq_cpu_down_prep(unsigned int cpu)
+{
+       mc_dev_devel("CPU #%d is going to die", cpu);
+       return switch_to_online_core(cpu);
+}
+#endif
+
+static inline bool is_iwp_id(u32 id)
+{
+       return (id & SID_IWP_NOTIFICATION) != 0;
+}
+
+static inline void session_state_update_internal(struct nq_session *session,
+                                                enum nq_notif_state state)
+{
+       mutex_lock(&session->mutex);
+       session->state = state;
+       session->cpu_clk = local_clock();
+       mutex_unlock(&session->mutex);
+}
+
+/*
+ * Notification Queue overflow management:
+ * - once the SWd NQ is full, sessions get added to the overflow queue:
+ *   'l_ctx.notifications'
+ * - as long as this queue is not empty, new notifications get added there
+ *   first, if not already present, then the queue is flushed
+ * - the queue is also flushed by the scheduler once the SWd has run
+ */
+static inline bool notif_queue_full(void)
+{
+       struct notification_queue *tx = l_ctx.nq.tx;
+
+       return (tx->hdr.write_cnt - tx->hdr.read_cnt) == tx->hdr.queue_size;
+}
+
+static inline void notif_queue_push(u32 session_id, u32 payload)
+{
+       struct notification_queue_header *hdr = &l_ctx.nq.tx->hdr;
+       u32 i = hdr->write_cnt % hdr->queue_size;
+
+       l_ctx.nq.tx->notification[i].session_id = session_id;
+       l_ctx.nq.tx->notification[i].payload = payload;
+       /*
+        * Ensure notification[] is written before we update the counter
+        * We want a ARM dmb() / ARM64 dmb(sy) here
+        */
+       smp_mb();
+
+       hdr->write_cnt++;
+       /*
+        * Ensure write_cnt is written before new notification
+        * We want a ARM dsb() / ARM64 dsb(sy) here
+        */
+       rmb();
+}
+
+static void retrieve_last_session_payload(u32 *session_id, u32 *payload)
+{
+       struct notification_queue_header *hdr = &l_ctx.nq.tx->hdr;
+       u32 i = (hdr->write_cnt - 1) % hdr->queue_size;
+
+       *session_id = l_ctx.nq.tx->notification[i].session_id;
+       *payload = l_ctx.nq.tx->notification[i].payload;
+}
+
+/* Must be called with l_ctx.notifications_mutex taken */
+static inline bool nq_notifications_flush(void)
+{
+       bool flushed = false;
+
+       while (!list_empty(&l_ctx.notifications) && !notif_queue_full()) {
+               struct nq_session *session;
+
+               session = list_first_entry(&l_ctx.notifications,
+                                          struct nq_session, list);
+               mc_dev_devel("pop %x", session->id);
+               notif_queue_push(session->id, session->payload);
+               session_state_update_internal(session, NQ_NOTIF_SENT);
+               list_del_init(&session->list);
+               flushed = true;
+       }
+
+       return flushed;
+}
+
+static int nq_scheduler_command(enum sched_command command)
+{
+       if (IS_ERR_OR_NULL(l_ctx.tee_scheduler_thread))
+               return -EFAULT;
+
+       mutex_lock(&l_ctx.request_mutex);
+       if (l_ctx.request < command) {
+               l_ctx.request = command;
+               complete(&l_ctx.idle_complete);
+       }
+
+       mutex_unlock(&l_ctx.request_mutex);
+       return 0;
+}
+
+static inline void nq_update_time(void)
+{
+       struct timespec tm;
+
+       getnstimeofday(&tm);
+       l_ctx.time->wall_clock_seconds = tm.tv_sec;
+       l_ctx.time->wall_clock_nsec = tm.tv_nsec;
+       getrawmonotonic(&tm);
+       l_ctx.time->monotonic_seconds = tm.tv_sec;
+       l_ctx.time->monotonic_nsec = tm.tv_nsec;
+}
+
+static inline void nq_notif_handler(u32 id, u32 payload)
+{
+       mc_dev_devel("NQ notif for id %x payload %x", id, payload);
+       if (is_iwp_id(id))
+               l_ctx.iwp_notif_handler(id, payload);
+       else
+               l_ctx.mcp_notif_handler(id, payload);
+}
+
+static int irq_bh_worker(void *arg)
+{
+       struct notification_queue *rx = l_ctx.nq.rx;
+
+       while (1) {
+               wait_for_completion_killable(&l_ctx.irq_bh_complete);
+
+               /* This thread can only be stopped with nq_stop */
+               if (!l_ctx.irq_bh_thread_run)
+                       break;
+
+               /* Deal with all pending notifications in one go */
+               while ((rx->hdr.write_cnt - rx->hdr.read_cnt) > 0) {
+                       struct notification nf;
+
+                       nf = rx->notification[
+                               rx->hdr.read_cnt % rx->hdr.queue_size];
+
+                       /*
+                        * Ensure read_cnt writing happens after buffer read
+                        * We want a ARM dmb() / ARM64 dmb(sy) here
+                        */
+                       smp_mb();
+                       rx->hdr.read_cnt++;
+                       /*
+                        * Ensure read_cnt writing finishes before reader
+                        * We want a ARM dsb() / ARM64 dsb(sy) here
+                        */
+                       rmb();
+                       nq_notif_handler(nf.session_id, nf.payload);
+               }
+
+               /*
+                * Finished processing notifications. It does not matter whether
+                * there actually were any notification or not.  S-SIQs can also
+                * be triggered by an SWd driver which was waiting for a FIQ.
+                * In this case the S-SIQ tells NWd that SWd is no longer idle
+                * an will need scheduling again.
+                */
+               nq_scheduler_command(NSIQ);
+       }
+       return 0;
+}
+
+static irqreturn_t irq_handler(int intr, void *arg)
+{
+       /* wake up thread to continue handling this interrupt */
+       complete(&l_ctx.irq_bh_complete);
+       return IRQ_HANDLED;
+}
+
+void nq_session_init(struct nq_session *session, bool is_gp)
+{
+       session->id = SID_INVALID;
+       session->payload = 0;
+       INIT_LIST_HEAD(&session->list);
+       mutex_init(&session->mutex);
+       session->state = NQ_NOTIF_IDLE;
+       session->cpu_clk = 0;
+       session->is_gp = is_gp;
+}
+
+void nq_session_exit(struct nq_session *session)
+{
+       mutex_lock(&l_ctx.notifications_mutex);
+       if (!list_empty(&session->list))
+               list_del(&session->list);
+       mutex_unlock(&l_ctx.notifications_mutex);
+}
+
+void nq_session_state_update(struct nq_session *session,
+                            enum nq_notif_state state)
+{
+       if (state < NQ_NOTIF_RECEIVED)
+               return;
+
+       session_state_update_internal(session, state);
+}
+
+int nq_session_notify(struct nq_session *session, u32 id, u32 payload)
+{
+       int ret = 0;
+
+       mutex_lock(&l_ctx.notifications_mutex);
+       session->id = id;
+       session->payload = payload;
+       if (!list_empty(&l_ctx.notifications) || notif_queue_full()) {
+               if (!list_empty(&session->list)) {
+                       ret = -EAGAIN;
+                       if (payload != session->payload) {
+                               mc_dev_err(ret,
+                                          "skip %x payload change %x -> %x",
+                                          session->id, session->payload,
+                                          payload);
+                       } else {
+                               mc_dev_devel("skip %x payload %x",
+                                            session->id, payload);
+                       }
+               } else {
+                       mc_dev_devel("push %x payload %x", session->id,
+                                    payload);
+                       /* session->payload = payload; */
+                       list_add_tail(&session->list, &l_ctx.notifications);
+                       session_state_update_internal(session, NQ_NOTIF_QUEUED);
+               }
+
+               nq_notifications_flush();
+
+               if (nq_scheduler_command(YIELD))
+                       ret = -EPROTO;
+       } else {
+               mc_dev_devel("send %x payload %x", session->id, payload);
+               notif_queue_push(session->id, payload);
+               session_state_update_internal(session, NQ_NOTIF_SENT);
+               if (nq_scheduler_command(NSIQ))
+                       ret = -EPROTO;
+       }
+
+       mutex_unlock(&l_ctx.notifications_mutex);
+       return ret;
+}
+
+const char *nq_session_state(const struct nq_session *session, u64 *cpu_clk)
+{
+       if (cpu_clk)
+               *cpu_clk = session->cpu_clk;
+
+       switch (session->state) {
+       case NQ_NOTIF_IDLE:
+               return "idle";
+       case NQ_NOTIF_QUEUED:
+               return "queued";
+       case NQ_NOTIF_SENT:
+               return "sent";
+       case NQ_NOTIF_RECEIVED:
+               return "received";
+       case NQ_NOTIF_CONSUMED:
+               return "consumed";
+       case NQ_NOTIF_DEAD:
+               return "dead";
+       }
+       return "error";
+}
+
+static ssize_t debug_crashdump_read(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       if (l_ctx.dump.off)
+               return simple_read_from_buffer(user_buf, count, ppos,
+                                              l_ctx.dump.buf, l_ctx.dump.off);
+
+       return 0;
+}
+
+static const struct file_operations debug_crashdump_ops = {
+       .read = debug_crashdump_read,
+       .llseek = default_llseek,
+};
+
+static ssize_t debug_smclog_read(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       return debug_generic_read(file, user_buf, count, ppos,
+                                 mc_fastcall_debug_smclog);
+}
+
+static const struct file_operations debug_smclog_ops = {
+       .read = debug_smclog_read,
+       .llseek = default_llseek,
+       .open = debug_generic_open,
+       .release = debug_generic_release,
+};
+
+static void nq_dump_status(void)
+{
+       static const struct {
+               unsigned int index;
+               const char *msg;
+       } status_map[] = {
+               /**< MobiCore control flags */
+               { MC_EXT_INFO_ID_FLAGS, "flags"},
+               /**< MobiCore halt condition code */
+               { MC_EXT_INFO_ID_HALT_CODE, "haltCode"},
+               /**< MobiCore halt condition instruction pointer */
+               { MC_EXT_INFO_ID_HALT_IP, "haltIp"},
+               /**< MobiCore fault counter */
+               { MC_EXT_INFO_ID_FAULT_CNT, "faultRec.cnt"},
+               /**< MobiCore last fault cause */
+               { MC_EXT_INFO_ID_FAULT_CAUSE, "faultRec.cause"},
+               /**< MobiCore last fault meta */
+               { MC_EXT_INFO_ID_FAULT_META, "faultRec.meta"},
+               /**< MobiCore last fault threadid */
+               { MC_EXT_INFO_ID_FAULT_THREAD, "faultRec.thread"},
+               /**< MobiCore last fault instruction pointer */
+               { MC_EXT_INFO_ID_FAULT_IP, "faultRec.ip"},
+               /**< MobiCore last fault stack pointer */
+               { MC_EXT_INFO_ID_FAULT_SP, "faultRec.sp"},
+               /**< MobiCore last fault ARM arch information */
+               { MC_EXT_INFO_ID_FAULT_ARCH_DFSR, "faultRec.arch.dfsr"},
+               /**< MobiCore last fault ARM arch information */
+               { MC_EXT_INFO_ID_FAULT_ARCH_ADFSR, "faultRec.arch.adfsr"},
+               /**< MobiCore last fault ARM arch information */
+               { MC_EXT_INFO_ID_FAULT_ARCH_DFAR, "faultRec.arch.dfar"},
+               /**< MobiCore last fault ARM arch information */
+               { MC_EXT_INFO_ID_FAULT_ARCH_IFSR, "faultRec.arch.ifsr"},
+               /**< MobiCore last fault ARM arch information */
+               { MC_EXT_INFO_ID_FAULT_ARCH_AIFSR, "faultRec.arch.aifsr"},
+               /**< MobiCore last fault ARM arch information */
+               { MC_EXT_INFO_ID_FAULT_ARCH_IFAR, "faultRec.arch.ifar"},
+               /**< MobiCore configured by Daemon via fc_init flag */
+               { MC_EXT_INFO_ID_MC_CONFIGURED, "mcData.flags"},
+               /**< MobiCore exception handler last partner */
+               { MC_EXT_INFO_ID_MC_EXC_PARTNER, "mcExcep.partner"},
+               /**< MobiCore exception handler last peer */
+               { MC_EXT_INFO_ID_MC_EXC_IPCPEER, "mcExcep.peer"},
+               /**< MobiCore exception handler last IPC message */
+               { MC_EXT_INFO_ID_MC_EXC_IPCMSG, "mcExcep.cause"},
+               /**< MobiCore exception handler last IPC data */
+               {MC_EXT_INFO_ID_MC_EXC_IPCDATA, "mcExcep.meta"},
+       };
+
+       char uuid_str[33];
+       int ret = 0;
+       size_t i;
+
+       if (l_ctx.dump.off)
+               ret = -EBUSY;
+
+       mc_dev_info("TEE HALTED");
+       if (l_ctx.tee_version) {
+               mc_dev_info("TEE version: %s", l_ctx.tee_version);
+               if (ret >= 0)
+                       ret = kasnprintf(&l_ctx.dump, "TEE version: %s\n",
+                                        l_ctx.tee_version);
+       }
+
+       mc_dev_info("Status dump:");
+       for (i = 0; i < (size_t)ARRAY_SIZE(status_map); i++) {
+               u32 info;
+
+               if (fc_info(status_map[i].index, NULL, &info))
+                       return;
+
+               mc_dev_info("  %-20s= 0x%08x", status_map[i].msg, info);
+               if (ret >= 0)
+                       ret = kasnprintf(&l_ctx.dump, "%-20s= 0x%08x\n",
+                                        status_map[i].msg, info);
+       }
+
+       /* construct UUID string */
+       for (i = 0; i < 4; i++) {
+               u32 info;
+               size_t j;
+
+               if (fc_info(MC_EXT_INFO_ID_MC_EXC_UUID + i, NULL, &info))
+                       return;
+
+               for (j = 0; j < sizeof(info); j++) {
+                       snprintf(&uuid_str[(i * sizeof(info) + j) * 2], 3,
+                                "%02x", (info >> (j * 8)) & 0xff);
+               }
+       }
+
+       mc_dev_info("  %-20s= 0x%s", "mcExcep.uuid", uuid_str);
+       if (ret >= 0)
+               ret = kasnprintf(&l_ctx.dump, "%-20s= 0x%s\n", "mcExcep.uuid",
+                                uuid_str);
+
+       if (ret < 0) {
+               kfree(l_ctx.dump.buf);
+               l_ctx.dump.off = 0;
+               return;
+       }
+
+       debugfs_create_file("crashdump", 0400, g_ctx.debug_dir, NULL,
+                           &debug_crashdump_ops);
+       debugfs_create_file("last_smc_commands", 0400, g_ctx.debug_dir, NULL,
+                           &debug_smclog_ops);
+}
+
+static void nq_handle_tee_crash(void)
+{
+       /*
+        * Do not change the call order: the debugfs nq status file needs
+        * to be created before requesting the Daemon to read it.
+        */
+       nq_dump_status();
+       blocking_notifier_call_chain(&l_ctx.tee_stop_notifiers, 0, NULL);
+}
+
+static inline void set_sleep_mode_rq(u16 sleep_req)
+{
+       mutex_lock(&l_ctx.buffer_mutex);
+       l_ctx.mcp_buffer->flags.sleep_mode.sleep_req = sleep_req;
+       mutex_unlock(&l_ctx.buffer_mutex);
+}
+
+static inline bool nq_suspended(void)
+{
+       struct mcp_flags *flags = &l_ctx.mcp_buffer->flags;
+       bool ret;
+
+       mutex_lock(&l_ctx.buffer_mutex);
+       ret = flags->sleep_mode.ready_to_sleep & MC_STATE_READY_TO_SLEEP;
+       if (!ret) {
+               mc_dev_devel("IDLE=%d", flags->schedule);
+               mc_dev_devel("Request Sleep=%d", flags->sleep_mode.sleep_req);
+               mc_dev_devel("Sleep Ready=%d",
+                            flags->sleep_mode.ready_to_sleep);
+       }
+
+       mutex_unlock(&l_ctx.buffer_mutex);
+       return ret;
+}
+
+/*
+ * Get the requested SWd sleep timeout value (ms)
+ * - if the timeout is -1, wait indefinitely
+ * - if the timeout is 0, re-schedule immediately (timeouts in Âµs in the SWd)
+ * - otherwise sleep for the required time
+ * returns true if sleep is required, false otherwise
+ */
+static inline bool nq_get_idle_timeout(s32 *timeout)
+{
+       u32 schedule;
+       bool ret;
+
+       mutex_lock(&l_ctx.buffer_mutex);
+       schedule = l_ctx.mcp_buffer->flags.schedule;
+       if (schedule == MC_FLAG_SCHEDULE_IDLE) {
+               *timeout = l_ctx.mcp_buffer->flags.timeout_ms;
+               ret = true;
+       } else {
+               ret = false;
+       }
+
+       mutex_unlock(&l_ctx.buffer_mutex);
+       return ret;
+}
+
+union mcp_message *nq_get_mcp_buffer(void)
+{
+       return &l_ctx.mcp_buffer->message;
+}
+
+struct interworld_session *nq_get_iwp_buffer(void)
+{
+       return l_ctx.iwp_buffer;
+}
+
+void nq_set_version_ptr(char *version)
+{
+       l_ctx.tee_version = version;
+}
+
+void nq_register_notif_handler(void (*handler)(u32 id, u32 payload), bool iwp)
+{
+       if (iwp)
+               l_ctx.iwp_notif_handler = handler;
+       else
+               l_ctx.mcp_notif_handler = handler;
+}
+
+int nq_register_tee_stop_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&l_ctx.tee_stop_notifiers, nb);
+}
+
+int nq_unregister_tee_stop_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&l_ctx.tee_stop_notifiers,
+                                                 nb);
+}
+
+ssize_t nq_get_stop_message(char __user *buffer, size_t size)
+{
+       size_t max_len = l_ctx.dump.size - l_ctx.dump.off;
+       char *buf = l_ctx.dump.buf;
+       int ret;
+
+       if (!l_ctx.dump.off || !max_len)
+               return 0;
+
+       if (size > max_len)
+               size = max_len;
+
+       ret = copy_to_user(buffer, buf, size);
+       if (ret)
+               return -EFAULT;
+
+       return size;
+}
+
+void nq_signal_tee_hung(void)
+{
+       mc_dev_devel("force stop the notification queue");
+       /* Stop the tee_scheduler thread */
+       l_ctx.tee_hung = true;
+       l_ctx.tee_scheduler_run = false;
+       complete(&l_ctx.idle_complete);
+       nq_scheduler_command(NONE);
+}
+
+static int nq_scheduler_pm_command(enum sched_command command)
+{
+       int ret = -EPERM;
+
+       if (IS_ERR_OR_NULL(l_ctx.tee_scheduler_thread))
+               return -EFAULT;
+
+       mutex_lock(&l_ctx.sleep_mutex);
+
+       /* Send request */
+       nq_scheduler_command(command);
+
+       /* Wait for scheduler to reply */
+       wait_for_completion(&l_ctx.sleep_complete);
+       mutex_lock(&l_ctx.request_mutex);
+       if (command == SUSPEND) {
+               if (l_ctx.suspended)
+                       ret = 0;
+       } else {
+               if (!l_ctx.suspended)
+                       ret = 0;
+       }
+
+       mutex_unlock(&l_ctx.request_mutex);
+       mutex_unlock(&l_ctx.sleep_mutex);
+       return ret;
+}
+
+static int nq_boot_tee(void)
+{
+       size_t q_len = ALIGN(2 * (sizeof(struct notification_queue_header) +
+               NQ_NUM_ELEMS * sizeof(struct notification)), 4);
+       struct irq_data *irq_d = irq_get_irq_data(l_ctx.irq);
+       int ret;
+
+       /* Call the INIT fastcall to setup shared buffers */
+       ret = fc_init(virt_to_phys(l_ctx.mci),
+                     (uintptr_t)l_ctx.mcp_buffer - (uintptr_t)l_ctx.mci, q_len,
+                     sizeof(*l_ctx.mcp_buffer));
+       logging_run();
+       if (ret)
+               return ret;
+
+       /* Set initialization values */
+#if defined(MC_INTR_SSIQ_SWD)
+       l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_IRQ;
+       l_ctx.mcp_buffer->message.init_values.irq = MC_INTR_SSIQ_SWD;
+#endif
+       l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_TIME;
+       if (irq_d)
+               l_ctx.mcp_buffer->message.init_values.irq = irq_d->hwirq;
+       l_ctx.mcp_buffer->message.init_values.time_ofs =
+               (u32)((uintptr_t)l_ctx.time - (uintptr_t)l_ctx.mci);
+       l_ctx.mcp_buffer->message.init_values.time_len =
+                       sizeof(*l_ctx.time);
+
+       l_ctx.mcp_buffer->message.init_values.flags |= MC_IV_FLAG_IWP;
+       l_ctx.mcp_buffer->message.init_values.iws_buf_ofs =
+               (u64)((uintptr_t)l_ctx.iwp_buffer - (uintptr_t)l_ctx.mci);
+       l_ctx.mcp_buffer->message.init_values.iws_buf_size =
+               MAX_IW_SESSION * sizeof(struct interworld_session);
+
+       /* First empty N-SIQ to setup of the MCI structure */
+       ret = fc_nsiq(0, 0);
+       logging_run();
+       if (ret)
+               return ret;
+
+       /*
+        * Wait until the TEE state switches to MC_STATUS_INITIALIZED
+        * It is assumed that it always switches state at some point
+        */
+       do {
+               u32 status = 0;
+               u32 timeslice;
+
+               ret = fc_info(MC_EXT_INFO_ID_MCI_VERSION, &status, NULL);
+               logging_run();
+               if (ret)
+                       return ret;
+
+               switch (status) {
+               case MC_STATUS_NOT_INITIALIZED:
+                       /* Switch to the TEE to give it more CPU time. */
+                       ret = EAGAIN;
+                       for (timeslice = 0; timeslice < 10; timeslice++) {
+                               int tmp_ret = fc_yield(timeslice);
+
+                               logging_run();
+                               if (tmp_ret)
+                                       return tmp_ret;
+                       }
+
+                       /* No need to loop like mad */
+                       if (ret == EAGAIN)
+                               usleep_range(100, 500);
+
+                       break;
+               case MC_STATUS_HALT:
+                       ret = -ENODEV;
+                       nq_handle_tee_crash();
+                       mc_dev_err(ret, "halt during init, state 0x%x", status);
+                       return ret;
+               case MC_STATUS_INITIALIZED:
+                       mc_dev_devel("ready");
+                       break;
+               default:
+                       /* MC_STATUS_BAD_INIT or anything else */
+                       ret = -EIO;
+                       mc_dev_err(ret, "MCI init failed, state 0x%x", status);
+                       return ret;
+               }
+       } while (ret == EAGAIN);
+
+       return ret;
+}
+
+static inline bool tee_sleep(s32 timeout_ms)
+{
+       bool infinite_timeout = timeout_ms < 0;
+
+       /* TEE is going to sleep */
+       mc_clock_disable();
+       do {
+               s32 local_timeout_ms;
+               unsigned long jiffies;
+
+               if (infinite_timeout) {
+                       local_timeout_ms = DEFAULT_TIMEOUT_MS;
+               } else {
+                       local_timeout_ms = timeout_ms;
+                       if (local_timeout_ms > DEFAULT_TIMEOUT_MS)
+                               local_timeout_ms = DEFAULT_TIMEOUT_MS;
+               }
+
+               jiffies = msecs_to_jiffies(local_timeout_ms);
+               if (wait_for_completion_timeout(&l_ctx.idle_complete, jiffies))
+                       break;
+
+               if (!infinite_timeout)
+                       timeout_ms -= local_timeout_ms;
+       } while (timeout_ms);
+
+       /* TEE is getting back to work */
+       mc_clock_enable();
+       return timeout_ms == 0;
+}
+
+static inline int nq_switch_core(void)
+{
+       int cpu = l_ctx.next_cpu;
+       int core_id;
+       int ret;
+
+       if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu))
+               return -EINVAL;
+
+       core_id = cpu_ids[cpu];
+       ret = fc_switch_core(core_id);
+       logging_run();
+       if (ret) {
+               mc_dev_err(ret, "failed to switch core from %d to %d",
+                          l_ctx.active_cpu, cpu);
+               return ret;
+       }
+
+       mc_dev_devel("switched core from %d to %d", l_ctx.active_cpu, cpu);
+       l_ctx.active_cpu = cpu;
+       return ret;
+}
+
+/*
+ * This thread, and only this thread, schedules the SWd. Hence, reading the idle
+ * status and its associated timeout is safe from race conditions.
+ */
+static int tee_scheduler(void *arg)
+{
+       int timeslice = 0;      /* Actually scheduling period */
+       int ret = 0;
+
+       /* Enable TEE clock */
+       mc_clock_enable();
+
+       /* Logging */
+       if (l_ctx.log_buffer_size) {
+               ret = fc_trace_init(l_ctx.log_buffer, l_ctx.log_buffer_size);
+               if (!ret) {
+                       logging_run();
+                       l_ctx.log_buffer_busy = true;
+                       mc_dev_info("registered log buffer of size %d",
+                                   l_ctx.log_buffer_size);
+               } else {
+                       mc_dev_err(ret, "failed to register log buffer");
+                       /* Ignore error */
+                       ret = 0;
+               }
+       } else {
+               mc_dev_info("no log buffer to register");
+       }
+
+       /* Bootup */
+       l_ctx.boot_ret = nq_boot_tee();
+       complete(&l_ctx.boot_complete);
+       if (l_ctx.boot_ret) {
+               mc_clock_disable();
+               return l_ctx.boot_ret;
+       }
+
+       /* Run */
+       while (1) {
+               s32 timeout_ms = -1;
+               bool pm_request = false;
+               u8 tee_flags;
+
+               if (l_ctx.suspended || nq_get_idle_timeout(&timeout_ms)) {
+                       /* If timeout is 0 we keep scheduling the SWd */
+                       if (!timeout_ms)
+                               nq_scheduler_command(NSIQ);
+                       else if (tee_sleep(timeout_ms))
+                               /* Timed out, force SWd schedule */
+                               nq_scheduler_command(NSIQ);
+               }
+
+               /*
+                * Potential exit causes:
+                * 1) nq_stop is called: just stop the thread (no crash dump)
+                * 2) nq_signal_tee_hung: breaks the loop and handle the hang as
+                *    a crash
+                * 3) The thread detects a TEE crash and breaks the loop
+                */
+               if (!l_ctx.tee_scheduler_run)
+                       break;
+
+               /* Get requested command if any */
+               mutex_lock(&l_ctx.request_mutex);
+               switch (l_ctx.request) {
+               case NONE:
+                       break;
+               case YIELD:
+                       /* Yield forced: increment timeslice */
+                       timeslice++;
+                       break;
+               case NSIQ:
+                       timeslice = 0;
+                       break;
+               case SUSPEND:
+                       /* Force N_SIQ */
+                       timeslice = 0;
+                       set_sleep_mode_rq(MC_FLAG_REQ_TO_SLEEP);
+                       pm_request = true;
+                       break;
+               case RESUME:
+                       /* Force N_SIQ */
+                       timeslice = 0;
+                       set_sleep_mode_rq(MC_FLAG_NO_SLEEP_REQ);
+                       pm_request = true;
+                       break;
+               }
+
+               /* Switch core */
+               if (l_ctx.next_cpu != l_ctx.active_cpu && !nq_switch_core()) {
+                       cpumask_t cpu_mask;
+
+                       cpumask_clear(&cpu_mask);
+                       cpumask_set_cpu(l_ctx.active_cpu, &cpu_mask);
+                       nq_set_cpus_allowed(l_ctx.tee_scheduler_thread,
+                                           cpu_mask);
+               }
+
+               l_ctx.request = NONE;
+               nq_update_time();
+               mutex_unlock(&l_ctx.request_mutex);
+
+               /* Reset timeout so we don't loop if SWd halted */
+               mutex_lock(&l_ctx.buffer_mutex);
+               l_ctx.mcp_buffer->flags.timeout_ms = -1;
+               mutex_unlock(&l_ctx.buffer_mutex);
+
+               if (timeslice--) {
+                       /* Resume SWd from where it was */
+                       fc_yield(timeslice);
+               } else {
+                       u32 session_id = 0;
+                       u32 payload = 0;
+
+                       retrieve_last_session_payload(&session_id, &payload);
+                       timeslice = SCHEDULING_FREQ;
+
+                       /* Call SWd scheduler */
+                       fc_nsiq(session_id, payload);
+               }
+
+               /* Always flush log buffer after the SWd has run */
+               logging_run();
+
+               /* Check crash */
+               mutex_lock(&l_ctx.buffer_mutex);
+               tee_flags = l_ctx.mcp_buffer->flags.tee_flags;
+               mutex_unlock(&l_ctx.buffer_mutex);
+               if (tee_flags & MC_STATE_FLAG_TEE_HALT_MASK) {
+                       ret = -EHOSTUNREACH;
+                       mc_dev_err(ret, "TEE halted, exiting");
+                       break;
+               }
+
+               /* Should have suspended by now if requested */
+               mutex_lock(&l_ctx.request_mutex);
+               if (pm_request) {
+                       l_ctx.suspended = nq_suspended();
+                       complete(&l_ctx.sleep_complete);
+               }
+
+               mutex_unlock(&l_ctx.request_mutex);
+
+               /* Flush pending notifications if possible */
+               mutex_lock(&l_ctx.notifications_mutex);
+               if (nq_notifications_flush())
+                       complete(&l_ctx.idle_complete);
+
+               mutex_unlock(&l_ctx.notifications_mutex);
+       }
+
+       mc_dev_devel("loop exit, ret is %d", ret);
+       if (ret || l_ctx.tee_hung) {
+               /* There is an error, the tee must have crashed */
+               nq_handle_tee_crash();
+       }
+
+       /* Logging */
+       ret = fc_trace_deinit();
+       if (!ret)
+               l_ctx.log_buffer_busy = false;
+       else
+               mc_dev_err(ret, "failed to unregister log buffer");
+
+       mc_clock_disable();
+       return ret;
+}
+
+int nq_suspend(void)
+{
+       return nq_scheduler_pm_command(SUSPEND);
+}
+
+int nq_resume(void)
+{
+       return nq_scheduler_pm_command(RESUME);
+}
+
+int nq_start(void)
+{
+       int ret;
+
+       /* Make sure we have the interrupt before going on */
+#if defined(CONFIG_OF)
+       l_ctx.irq = irq_of_parse_and_map(g_ctx.mcd->of_node, 0);
+#endif
+#if defined(MC_INTR_SSIQ)
+       if (l_ctx.irq <= 0)
+               l_ctx.irq = MC_INTR_SSIQ;
+#endif
+
+       if (l_ctx.irq <= 0) {
+               ret = -EINVAL;
+               mc_dev_err(ret, "No IRQ number, aborting");
+               return ret;
+       }
+
+       ret = request_irq(l_ctx.irq, irq_handler, IRQF_TRIGGER_RISING,
+                         "trustonic", NULL);
+       if (ret)
+               return ret;
+
+       /*
+        * Initialize the time structure for SWd
+        * At this stage, we don't know if the SWd needs to get the REE time and
+        * we set it anyway.
+        */
+       nq_update_time();
+
+       /* Setup S-SIQ interrupt handler and its bottom-half */
+       l_ctx.irq_bh_thread_run = true;
+       l_ctx.irq_bh_thread = kthread_run(irq_bh_worker, NULL, "tee_irq_bh");
+       if (IS_ERR(l_ctx.irq_bh_thread)) {
+               ret = PTR_ERR(l_ctx.irq_bh_thread);
+               mc_dev_err(ret, "irq_bh_worker thread creation failed");
+               return ret;
+       }
+
+       /* Scheduler */
+       l_ctx.tee_scheduler_run = true;
+       l_ctx.tee_scheduler_thread = kthread_create(tee_scheduler, NULL,
+                                                   "tee_scheduler");
+       if (IS_ERR(l_ctx.tee_scheduler_thread)) {
+               ret = PTR_ERR(l_ctx.tee_scheduler_thread);
+               mc_dev_err(ret, "tee_scheduler thread creation failed");
+               return ret;
+       }
+
+       /* The scheduler/fastcall thread MUST run on CPU 0 at startup */
+       nq_set_cpus_allowed(l_ctx.tee_scheduler_thread, CPU_MASK_CPU0);
+       wake_up_process(l_ctx.tee_scheduler_thread);
+
+       wait_for_completion(&l_ctx.boot_complete);
+       if (l_ctx.boot_ret)
+               return l_ctx.boot_ret;
+
+       complete(&l_ctx.idle_complete);
+       return 0;
+}
+
+void nq_stop(void)
+{
+       /* Scheduler */
+       l_ctx.tee_scheduler_run = false;
+       complete(&l_ctx.idle_complete);
+       kthread_stop(l_ctx.tee_scheduler_thread);
+
+       /* NQ */
+       l_ctx.irq_bh_thread_run = false;
+       complete(&l_ctx.irq_bh_complete);
+       kthread_stop(l_ctx.irq_bh_thread);
+       free_irq(l_ctx.irq, NULL);
+}
+
+int nq_init(void)
+{
+       size_t q_len, mci_len;
+       unsigned long mci;
+       int ret;
+
+       if (nr_cpu_ids) {
+#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+               ret = register_cpu_notifier(&cpu_notifer);
+#else
+               ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                               "tee/trustonic:online",
+                                               NULL, nq_cpu_down_prep);
+#endif
+               /* ExySp : Kinibi 410 */
+               if (ret < 0) {
+                       mc_dev_err(ret, "cpu online callback setup failed");
+                       goto err_register;
+               }
+       }
+
+       ret = mc_clock_init();
+       if (ret)
+               goto err_clock;
+
+       ret = logging_init(&l_ctx.log_buffer, &l_ctx.log_buffer_size);
+       if (ret)
+               goto err_logging;
+
+       /* Setup crash handler function list */
+       BLOCKING_INIT_NOTIFIER_HEAD(&l_ctx.tee_stop_notifiers);
+
+       mutex_init(&l_ctx.buffer_mutex);
+       init_completion(&l_ctx.irq_bh_complete);
+       /* Setup notification queue mutex */
+       mutex_init(&l_ctx.notifications_mutex);
+       INIT_LIST_HEAD(&l_ctx.notifications);
+
+       /* NQ_NUM_ELEMS must be power of 2 */
+       q_len = ALIGN(2 * (sizeof(struct notification_queue_header) +
+                          NQ_NUM_ELEMS * sizeof(struct notification)), 4);
+
+       mci_len = q_len +
+               sizeof(*l_ctx.time) +
+               sizeof(*l_ctx.mcp_buffer) +
+               MAX_IW_SESSION * sizeof(struct interworld_session);
+
+       l_ctx.order = get_order(mci_len);
+
+       mci = __get_free_pages(GFP_USER | __GFP_ZERO, l_ctx.order);
+       if (!mci)
+               goto err_mci;
+
+       l_ctx.nq.tx = (struct notification_queue *)mci;
+       l_ctx.nq.tx->hdr.queue_size = NQ_NUM_ELEMS;
+       mci += sizeof(struct notification_queue_header) +
+           l_ctx.nq.tx->hdr.queue_size * sizeof(struct notification);
+
+       l_ctx.nq.rx = (struct notification_queue *)mci;
+       l_ctx.nq.rx->hdr.queue_size = NQ_NUM_ELEMS;
+       mci += sizeof(struct notification_queue_header) +
+           l_ctx.nq.rx->hdr.queue_size * sizeof(struct notification);
+
+       l_ctx.mcp_buffer = (void *)ALIGN(mci, 8);
+       mci += sizeof(struct mcp_buffer);
+
+       /* interworld_buffer contains:
+        *   MAX_IW_SESSION session, and for each session S(i), we could have
+        *   D(i) extra data, NB: D(i) could be different from D(j)
+        *
+        * v0: D(i) = 0
+        */
+       /* mci should be already 8 bytes aligned */
+       l_ctx.iwp_buffer = (void *)ALIGN(mci, 8);
+       mci += MAX_IW_SESSION * sizeof(struct interworld_session);
+
+       l_ctx.time = (void *)ALIGN(mci, 8);
+
+       /* Scheduler */
+       init_completion(&l_ctx.boot_complete);
+       init_completion(&l_ctx.idle_complete);
+       init_completion(&l_ctx.sleep_complete);
+       mutex_init(&l_ctx.sleep_mutex);
+       mutex_init(&l_ctx.request_mutex);
+       return 0;
+
+err_mci:
+       logging_exit(l_ctx.log_buffer_busy);
+err_logging:
+       mc_clock_exit();
+err_clock:
+       if (nr_cpu_ids)
+#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+               unregister_cpu_notifier(&cpu_notifer);
+#else
+               cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+#endif
+err_register:
+       return ret;
+}
+
+void nq_exit(void)
+{
+       if (l_ctx.dump.off)
+               kfree(l_ctx.dump.buf);
+
+       free_pages((unsigned long)l_ctx.mci, l_ctx.order);
+       logging_exit(l_ctx.log_buffer_busy);
+       if (nr_cpu_ids)
+#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+               unregister_cpu_notifier(&cpu_notifer);
+#else
+               cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
+#endif
+       mc_clock_exit();
+}
+
+int mc_active_core(void)
+{
+       return l_ctx.active_cpu;
+}
+
+int mc_switch_core(int cpu)
+{
+       if (cpu >= nr_cpu_ids)
+               return -EINVAL;
+
+       if (!cpu_online(cpu))
+               return -EPERM;
+
+       l_ctx.next_cpu = cpu;
+       /* Ping the tee_scheduler thread to update */
+       nq_scheduler_command(YIELD);
+
+       return 0;
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/nq.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/nq.h
new file mode 100755 (executable)
index 0000000..7fbbf28
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_NQ_H_
+#define _MC_NQ_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+/** Max number of interworld session allocated in MCI buffer */
+#define MAX_IW_SESSION 256
+
+enum nq_notif_state {
+       NQ_NOTIF_IDLE,          /* Nothing happened yet */
+       NQ_NOTIF_QUEUED,        /* Notification in overflow queue */
+       NQ_NOTIF_SENT,          /* Notification in send queue */
+       NQ_NOTIF_RECEIVED,      /* Notification received */
+       NQ_NOTIF_CONSUMED,      /* Notification reported to CA */
+       NQ_NOTIF_DEAD,          /* Error reported to CA */
+};
+
+/* FIXME to be renamed */
+struct nq_session {
+       /* Notification id */
+       u32                     id;
+       /* Notification payload */
+       u32                     payload;
+       /* Notifications list */
+       struct list_head        list;
+       /* Notification debug mutex */
+       struct mutex            mutex;
+       /* Current notification/session state */
+       enum nq_notif_state     state;
+       /* Time at notification state change */
+       u64                     cpu_clk;
+       /* This TA is of Global Platform type, set by upper layer */
+       bool                    is_gp;
+};
+
+/* Notification queue channel */
+void nq_session_init(struct nq_session *session, bool is_gp);
+void nq_session_exit(struct nq_session *session);
+void nq_session_state_update(struct nq_session *session,
+                            enum nq_notif_state state);
+int nq_session_notify(struct nq_session *session, u32 id, u32 payload);
+const char *nq_session_state(const struct nq_session *session, u64 *cpu_clk);
+
+/* Services */
+union mcp_message *nq_get_mcp_buffer(void);
+struct interworld_session *nq_get_iwp_buffer(void);
+void nq_set_version_ptr(char *version);
+void nq_register_notif_handler(void (*handler)(u32 id, u32 payload), bool iwp);
+int nq_register_tee_stop_notifier(struct notifier_block *nb);
+int nq_unregister_tee_stop_notifier(struct notifier_block *nb);
+ssize_t nq_get_stop_message(char __user *buffer, size_t size);
+void nq_signal_tee_hung(void);
+
+/* SWd suspend/resume */
+int nq_suspend(void);
+int nq_resume(void);
+
+/* Start/stop TEE */
+int nq_start(void);
+void nq_stop(void);
+
+/* Initialisation/cleanup */
+int nq_init(void);
+void nq_exit(void);
+
+#endif /* _MC_NQ_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/platform.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/platform.h
new file mode 100755 (executable)
index 0000000..761325c
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Header file of MobiCore Driver Kernel Module Platform
+ * specific structures
+ *
+ * Internal structures of the McDrvModule
+ *
+ * Header file the MobiCore Driver Kernel Module,
+ * its internal structures and defines.
+ */
+#ifndef _MC_DRV_PLATFORM_H_
+#define _MC_DRV_PLATFORM_H_
+
+#define IRQ_SPI(x)      (x + 32)
+
+/* MobiCore Interrupt. */
+#if defined(CONFIG_SOC_EXYNOS3250) || defined(CONFIG_SOC_EXYNOS3472)
+#define MC_INTR_SSIQ   254
+#elif defined(CONFIG_SOC_EXYNOS3475) || defined(CONFIG_SOC_EXYNOS5430) || \
+       defined(CONFIG_SOC_EXYNOS5433) || defined(CONFIG_SOC_EXYNOS7870) || \
+       defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS7880) || defined(CONFIG_SOC_EXYNOS8895)
+#define MC_INTR_SSIQ   255
+#elif defined(CONFIG_SOC_EXYNOS7420) || defined(CONFIG_SOC_EXYNOS7580)
+#define MC_INTR_SSIQ   246
+#endif
+
+/* Enable Runtime Power Management */
+#if defined(CONFIG_SOC_EXYNOS3472)
+#ifdef CONFIG_PM_RUNTIME
+#define MC_PM_RUNTIME
+#endif
+#endif /* CONFIG_SOC_EXYNOS3472 */
+
+#if !defined(CONFIG_SOC_EXYNOS3472)
+
+#define TBASE_CORE_SWITCHER
+
+#if defined(CONFIG_SOC_EXYNOS3250)
+#define COUNT_OF_CPUS 2
+#elif defined(CONFIG_SOC_EXYNOS3475)
+#define COUNT_OF_CPUS 4
+#else
+#define COUNT_OF_CPUS 8
+#endif
+
+/* Values of MPIDR regs */
+#if defined(CONFIG_SOC_EXYNOS3250) || defined(CONFIG_SOC_EXYNOS3475)
+#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003}
+#elif defined(CONFIG_SOC_EXYNOS7580) || defined(CONFIG_SOC_EXYNOS7870) || defined(CONFIG_SOC_EXYNOS7880)
+#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003, 0x0100, 0x0101, 0x0102, 0x0103}
+#elif defined(CONFIG_SOC_EXYNOS9810)
+/* On Cortex A55, bit 24 is used to differentiate
+ * between different MPIDR format. So the whole MPIDR
+ * must be transmited
+ */
+#define CPU_IDS {0x81000000, 0x81000100, 0x81000200, 0x81000300, 0x80000100,\
+               0x80000101, 0x80000102, 0x80000103}
+#elif defined(CONFIG_SOC_EXYNOS7885)
+#define CPU_IDS {0x0100, 0x0101, 0x0102, 0x0103, 0x0200, 0x0201, 0x0000, 0x0001}
+#elif defined(CONFIG_SOC_EXYNOS9610)
+#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003, 0x0100, 0x0101, 0x0102, 0x0103}
+#elif defined(CONFIG_SOC_EXYNOS9820)
+#define CPU_IDS {0x81000000, 0x81000100, 0x81000200, 0x81000300, \
+               0x81000400, 0x81000500, 0x80000100, 0x80000101}
+#else
+#define CPU_IDS {0x0100, 0x0101, 0x0102, 0x0103, 0x0000, 0x0001, 0x0002, 0x0003}
+#endif
+#endif /* !CONFIG_SOC_EXYNOS3472 */
+
+/* Force usage of xenbus_map_ring_valloc as of Linux v4.1 */
+#define MC_XENBUS_MAP_RING_VALLOC_4_1
+
+/* Enable Fastcall worker thread */
+#define MC_FASTCALL_WORKER_THREAD
+
+/* Set Parameters for Secure OS Boosting */
+#define DEFAULT_LITTLE_CORE            0
+#define NONBOOT_LITTLE_CORE            1
+#define DEFAULT_BIG_CORE               4
+#define MIGRATE_TARGET_CORE     DEFAULT_BIG_CORE
+
+#define MC_INTR_LOCAL_TIMER            (IRQ_SPI(238) + DEFAULT_BIG_CORE)
+
+#define LOCAL_TIMER_PERIOD             50
+
+#define DEFAULT_SECOS_BOOST_TIME       5000
+#define MAX_SECOS_BOOST_TIME           600000  /* 600 sec */
+
+#define DUMP_TBASE_HALT_STATUS
+
+#endif /* _MC_DRV_PLATFORM_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_api.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_api.h
new file mode 100755 (executable)
index 0000000..cea4980
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This header file corresponds to V1.0 of the GlobalPlatform
+ * TEE Client API Specification
+ */
+#ifndef __TEE_CLIENT_API_H__
+#define __TEE_CLIENT_API_H__
+
+#include "tee_client_types.h"
+#include "tee_client_error.h"
+
+#include "tee_client_api_imp.h"
+
+#define TEEC_EXPORT
+
+/*
+ * The header tee_client_api_imp.h must define implementation-dependent types,
+ * constants and macros.
+ *
+ * The implementation-dependent types are:
+ *   - teec_context_imp
+ *   - teec_session_imp
+ *   - teec_shared_memory_imp
+ *   - teec_operation_imp
+ *
+ * The implementation-dependent constants are:
+ *   - TEEC_CONFIG_SHAREDMEM_MAX_SIZE
+ * The implementation-dependent macros are:
+ *   - TEEC_PARAM_TYPES
+ */
+
+struct teec_value {
+       u32 a;
+       u32 b;
+};
+
+/* Type definitions */
+struct teec_context {
+       struct teec_context_imp imp;
+};
+
+struct teec_session {
+       struct teec_session_imp imp;
+};
+
+struct teec_shared_memory {
+       void                          *buffer;
+       size_t                        size;
+       u32                           flags;
+       struct teec_shared_memory_imp imp;
+};
+
+struct teec_temp_memory_reference {
+       void   *buffer;
+       size_t size;
+};
+
+struct teec_registered_memory_reference {
+       struct teec_shared_memory *parent;
+       size_t                    size;
+       size_t                    offset;
+};
+
+union teec_parameter {
+       struct teec_temp_memory_reference       tmpref;
+       struct teec_registered_memory_reference memref;
+       struct teec_value                       value;
+};
+
+struct teec_operation {
+       u32                       started;
+       u32                       param_types;
+       union teec_parameter      params[4];
+       struct teec_operation_imp imp;
+};
+
+#define TEEC_ORIGIN_API                     0x00000001
+#define TEEC_ORIGIN_COMMS                   0x00000002
+#define TEEC_ORIGIN_TEE                     0x00000003
+#define TEEC_ORIGIN_TRUSTED_APP             0x00000004
+
+#define TEEC_MEM_INPUT                      0x00000001
+#define TEEC_MEM_OUTPUT                     0x00000002
+
+#define TEEC_NONE                           0x0
+#define TEEC_VALUE_INPUT                    0x1
+#define TEEC_VALUE_OUTPUT                   0x2
+#define TEEC_VALUE_INOUT                    0x3
+#define TEEC_MEMREF_TEMP_INPUT              0x5
+#define TEEC_MEMREF_TEMP_OUTPUT             0x6
+#define TEEC_MEMREF_TEMP_INOUT              0x7
+#define TEEC_MEMREF_WHOLE                   0xC
+#define TEEC_MEMREF_PARTIAL_INPUT           0xD
+#define TEEC_MEMREF_PARTIAL_OUTPUT          0xE
+#define TEEC_MEMREF_PARTIAL_INOUT           0xF
+
+#define TEEC_LOGIN_PUBLIC                   0x00000000
+#define TEEC_LOGIN_USER                     0x00000001
+#define TEEC_LOGIN_GROUP                    0x00000002
+#define TEEC_LOGIN_APPLICATION              0x00000004
+#define TEEC_LOGIN_USER_APPLICATION         0x00000005
+#define TEEC_LOGIN_GROUP_APPLICATION        0x00000006
+
+#define TEEC_TIMEOUT_INFINITE               0xFFFFFFFF
+
+#pragma GCC visibility push(default)
+
+TEEC_EXPORT u32
+teec_initialize_context(const char *name, struct teec_context *context);
+
+TEEC_EXPORT void
+teec_finalize_context(struct teec_context *context);
+
+TEEC_EXPORT u32
+teec_register_shared_memory(struct teec_context *context,
+                           struct teec_shared_memory *shared_mem);
+
+TEEC_EXPORT u32
+teec_allocate_shared_memory(struct teec_context *context,
+                           struct teec_shared_memory *shared_mem);
+
+TEEC_EXPORT void
+teec_release_shared_memory(struct teec_shared_memory *shared_mem);
+
+TEEC_EXPORT u32
+teec_open_session(struct teec_context *context,
+                 struct teec_session *session,
+                 const struct teec_uuid *destination,
+                 u32 connection_method, /* Should be 0 */
+                 const void *connection_data,
+                 struct teec_operation *operation,
+                 u32 *return_origin);
+
+TEEC_EXPORT void
+teec_close_session(struct teec_session *session);
+
+TEEC_EXPORT u32
+teec_invoke_command(struct teec_session *session,
+                   u32 command_id,
+                   struct teec_operation *operation,
+                   u32 *return_origin);
+
+TEEC_EXPORT void
+teec_request_cancellation(struct teec_operation *operation);
+
+#pragma GCC visibility pop
+
+#endif /* __TEE_CLIENT_API_H__ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_api_imp.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_api_imp.h
new file mode 100755 (executable)
index 0000000..41c9e95
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This header file defines the implementation-dependent types,
+ * constants and macros for all the Kinibi implementations of the TEE Client API
+ */
+#ifndef __TEE_CLIENT_API_IMP_H__
+#define __TEE_CLIENT_API_IMP_H__
+
+#define TEEC_MEM_INOUT (TEEC_MEM_INPUT | TEEC_MEM_OUTPUT)
+
+struct tee_client;
+
+struct teec_context_imp {
+       struct tee_client *client;
+};
+
+struct teec_session_imp {
+       u32                     session_id;
+       struct teec_context_imp context;
+       bool                    active;
+};
+
+struct teec_shared_memory_imp {
+       struct tee_client *client;
+       bool implementation_allocated;
+};
+
+struct teec_operation_imp {
+       struct teec_session_imp *session;
+};
+
+/*
+ * There is no natural, compile-time limit on the shared memory, but a specific
+ * implementation may introduce a limit (in particular on TrustZone)
+ */
+#define TEEC_CONFIG_SHAREDMEM_MAX_SIZE ((size_t)0xFFFFFFFF)
+
+#define TEEC_PARAM_TYPES(entry0_type, entry1_type, entry2_type, entry3_type) \
+       ((entry0_type) | ((entry1_type) << 4) | \
+       ((entry2_type) << 8) | ((entry3_type) << 12))
+
+#endif /* __TEE_CLIENT_API_IMP_H__ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_error.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_error.h
new file mode 100755 (executable)
index 0000000..dcffbe0
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TEE_CLIENT_ERROR_H__
+#define __TEE_CLIENT_ERROR_H__
+
+#define TEEC_SUCCESS                      ((uint32_t)0x00000000)
+
+/**
+ * Generic error code : Generic error
+ **/
+#define TEEC_ERROR_GENERIC                ((uint32_t)0xFFFF0000)
+
+/**
+ * Generic error code : The underlying security system denies the access to the
+ * object
+ **/
+#define TEEC_ERROR_ACCESS_DENIED          ((uint32_t)0xFFFF0001)
+
+/**
+ * Generic error code : The pending operation is cancelled.
+ **/
+#define TEEC_ERROR_CANCEL                 ((uint32_t)0xFFFF0002)
+
+/**
+ * Generic error code : The underlying system detects a conflict
+ **/
+#define TEEC_ERROR_ACCESS_CONFLICT        ((uint32_t)0xFFFF0003)
+
+/**
+ * Generic error code : Too much data for the operation or some data remain
+ * unprocessed by the operation.
+ **/
+#define TEEC_ERROR_EXCESS_DATA            ((uint32_t)0xFFFF0004)
+
+/**
+ * Generic error code : Error of data format
+ **/
+#define TEEC_ERROR_BAD_FORMAT             ((uint32_t)0xFFFF0005)
+
+/**
+ * Generic error code : The specified parameters are invalid
+ **/
+#define TEEC_ERROR_BAD_PARAMETERS         ((uint32_t)0xFFFF0006)
+
+/**
+ * Generic error code : Illegal state for the operation.
+ **/
+#define TEEC_ERROR_BAD_STATE              ((uint32_t)0xFFFF0007)
+
+/**
+ * Generic error code : The item is not found
+ **/
+#define TEEC_ERROR_ITEM_NOT_FOUND         ((uint32_t)0xFFFF0008)
+
+/**
+ * Generic error code : The specified operation is not implemented
+ **/
+#define TEEC_ERROR_NOT_IMPLEMENTED        ((uint32_t)0xFFFF0009)
+
+/**
+ * Generic error code : The specified operation is not supported
+ **/
+#define TEEC_ERROR_NOT_SUPPORTED          ((uint32_t)0xFFFF000A)
+
+/**
+ * Generic error code : Insufficient data is available for the operation.
+ **/
+#define TEEC_ERROR_NO_DATA                ((uint32_t)0xFFFF000B)
+
+/**
+ * Generic error code : Not enough memory to perform the operation
+ **/
+#define TEEC_ERROR_OUT_OF_MEMORY          ((uint32_t)0xFFFF000C)
+
+/**
+ * Generic error code : The service is currently unable to handle the request;
+ * try later
+ **/
+#define TEEC_ERROR_BUSY                   ((uint32_t)0xFFFF000D)
+
+/**
+ * Generic communication error
+ **/
+#define TEEC_ERROR_COMMUNICATION          ((uint32_t)0xFFFF000E)
+
+/**
+ * Generic error code : security violation
+ **/
+#define TEEC_ERROR_SECURITY               ((uint32_t)0xFFFF000F)
+
+/**
+ * Generic error code : the buffer is too short
+ **/
+#define TEEC_ERROR_SHORT_BUFFER           ((uint32_t)0xFFFF0010)
+
+/**
+ * Error of communication: The target of the connection is dead
+ **/
+#define TEEC_ERROR_TARGET_DEAD            ((uint32_t)0xFFFF3024)
+
+/**
+ * File system error code: not enough space to complete the operation.
+ **/
+#define TEEC_ERROR_STORAGE_NO_SPACE       ((uint32_t)0xFFFF3041)
+
+#endif /* __TEE_CLIENT_ERROR_H__ */
+
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_types.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/GP/tee_client_types.h
new file mode 100755 (executable)
index 0000000..812b97a
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TEE_CLIENT_TYPES_H__
+#define __TEE_CLIENT_TYPES_H__
+
+/* Definition of an UUID (from RFC 4122 http://www.ietf.org/rfc/rfc4122.txt) */
+struct teec_uuid {
+       u32 time_low;
+       u16 time_mid;
+       u16 time_hi_and_version;
+       u8  clock_seq_and_node[8];
+};
+
+/* Type definition for a TEE Identity */
+struct tee_identity {
+       u32 login;
+       struct teec_uuid uuid;
+};
+
+#endif /* __TEE_CLIENT_TYPES_H__ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_admin.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_admin.h
new file mode 100755 (executable)
index 0000000..1e4d4db
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MC_ADMIN_IOCTL_H__
+#define __MC_ADMIN_IOCTL_H__
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MC_ADMIN_DEVNODE "mobicore"
+
+/* Driver/daemon commands */
+enum {
+       /* Command 0 is reserved */
+       MC_DRV_GET_ROOT_CONTAINER = 1,
+       MC_DRV_GET_SP_CONTAINER = 2,
+       MC_DRV_GET_TRUSTLET_CONTAINER = 3,
+       MC_DRV_GET_TRUSTLET = 4,
+       MC_DRV_SIGNAL_CRASH = 5,
+};
+
+/* MobiCore IOCTL magic number */
+#define MC_IOC_MAGIC    'M'
+
+struct mc_admin_request {
+       __u32            request_id;    /* Unique request identifier */
+       __u32            command;       /* Command to daemon */
+       struct mc_uuid_t uuid;          /* UUID of trustlet, if relevant */
+       __u32            is_gp;         /* Whether trustlet is GP */
+       __u32            spid;          /* SPID of trustlet, if relevant */
+};
+
+struct mc_admin_response {
+       __u32           request_id;     /* Unique request identifier */
+       __u32           error_no;       /* Errno from daemon */
+       __u32           spid;           /* SPID of trustlet, if relevant */
+       __u32           service_type;   /* Type of trustlet being returned */
+       __u32           length;         /* Length of data to get */
+       /* Any data follows */
+};
+
+struct mc_admin_driver_info {
+       /* Version, and something else..*/
+       __u32           drv_version;
+       __u32           initial_cmd_id;
+};
+
+struct mc_admin_load_info {
+       __u32            spid;          /* SPID of trustlet, if relevant */
+       __u64            address;       /* Address of the data */
+       __u32            length;        /* Length of data to get */
+       struct mc_uuid_t uuid;          /* UUID of trustlet, if relevant */
+};
+
+#define MC_ADMIN_IO_GET_DRIVER_REQUEST \
+       _IOR(MC_IOC_MAGIC, 0, struct mc_admin_request)
+#define MC_ADMIN_IO_GET_INFO \
+       _IOR(MC_IOC_MAGIC, 1, struct mc_admin_driver_info)
+#define MC_ADMIN_IO_LOAD_DRIVER \
+       _IOW(MC_IOC_MAGIC, 2, struct mc_admin_load_info)
+#define MC_ADMIN_IO_LOAD_TOKEN \
+       _IOW(MC_IOC_MAGIC, 3, struct mc_admin_load_info)
+#define MC_ADMIN_IO_LOAD_CHECK \
+       _IOW(MC_IOC_MAGIC, 4, struct mc_admin_load_info)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MC_ADMIN_IOCTL_H__ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_linux_api.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_linux_api.h
new file mode 100755 (executable)
index 0000000..65156e4
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_LINUX_API_H_
+#define _MC_LINUX_API_H_
+
+#include <linux/types.h>
+
+/*
+ * Switch TEE active core to core_num, defined as linux
+ * core id
+ */
+int mc_switch_core(int core_num);
+
+/*
+ * Return TEE active core as Linux core id
+ */
+int mc_active_core(void);
+
+#endif /* _MC_LINUX_API_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_user.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mc_user.h
new file mode 100755 (executable)
index 0000000..666227b
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_USER_H_
+#define _MC_USER_H_
+
+#define MCDRVMODULEAPI_VERSION_MAJOR 7
+#define MCDRVMODULEAPI_VERSION_MINOR 0
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+#define BIT(n)                         (1 << (n))
+#endif /* __KERNEL__ */
+
+#define MC_USER_DEVNODE                        "mobicore-user"
+
+/** Maximum length of MobiCore product ID string. */
+#define MC_PRODUCT_ID_LEN              64
+
+/** Number of buffers that can be mapped at once */
+#define MC_MAP_MAX                     4
+
+/* Max length for buffers */
+#define MC_MAX_TCI_LEN                 0x100000
+#define BUFFER_LENGTH_MAX              0x40000000
+
+/* Flags for buffers to map (aligned on GP) */
+#define MC_IO_MAP_INPUT                        BIT(0)
+#define MC_IO_MAP_OUTPUT               BIT(1)
+#define MC_IO_MAP_INPUT_OUTPUT         (MC_IO_MAP_INPUT | MC_IO_MAP_OUTPUT)
+
+/*
+ * Universally Unique Identifier (UUID) according to ISO/IEC 11578.
+ */
+struct mc_uuid_t {
+       __u8            value[16];      /* Value of the UUID */
+};
+
+/*
+ * GP TA login types.
+ */
+enum mc_login_type {
+       LOGIN_PUBLIC = 0,
+       LOGIN_USER,
+       LOGIN_GROUP,
+       LOGIN_APPLICATION = 4,
+       LOGIN_USER_APPLICATION,
+       LOGIN_GROUP_APPLICATION,
+};
+
+/*
+ * GP TA identity structure.
+ */
+struct mc_identity {
+       enum mc_login_type      login_type;
+       union {
+               __u8            login_data[16];
+               gid_t           gid;            /* Requested group id */
+               struct {
+                       uid_t   euid;
+                       uid_t   ruid;
+               } uid;
+       };
+};
+
+/*
+ * Data exchange structure of the MC_IO_OPEN_SESSION ioctl command.
+ */
+struct mc_ioctl_open_session {
+       struct mc_uuid_t uuid;          /* trustlet uuid */
+       __u32           is_gp_uuid;     /* uuid is for GP TA */
+       __u32           sid;            /* session id (out) */
+       __u64           tci;            /* tci buffer pointer */
+       __u32           tcilen;         /* tci length */
+       struct mc_identity identity;    /* GP TA identity */
+};
+
+/*
+ * Data exchange structure of the MC_IO_OPEN_TRUSTLET ioctl command.
+ */
+struct mc_ioctl_open_trustlet {
+       __u32           sid;            /* session id (out) */
+       __u32           spid;           /* trustlet spid */
+       __u64           buffer;         /* trustlet binary pointer */
+       __u32           tlen;           /* binary length  */
+       __u64           tci;            /* tci buffer pointer */
+       __u32           tcilen;         /* tci length */
+};
+
+/*
+ * Data exchange structure of the MC_IO_WAIT ioctl command.
+ */
+struct mc_ioctl_wait {
+       __u32           sid;            /* session id (in) */
+       __s32           timeout;        /* notification timeout */
+       __u32           partial;        /* for proxy server to retry silently */
+};
+
+/*
+ * Data exchange structure of the MC_IO_ALLOC ioctl command.
+ */
+struct mc_ioctl_alloc {
+       __u32           len;            /* buffer length  */
+       __u32           handle;         /* user handle for the buffer (out) */
+};
+
+/*
+ * Buffer mapping incoming and outgoing information.
+ */
+struct mc_ioctl_buffer {
+       __u64           va;             /* user space address of buffer */
+       __u32           len;            /* buffer length  */
+       __u64           sva;            /* SWd virt address of buffer (out) */
+       __u32           flags;          /* buffer flags  */
+};
+
+/*
+ * Data exchange structure of the MC_IO_MAP and MC_IO_UNMAP ioctl commands.
+ */
+struct mc_ioctl_map {
+       __u32           sid;            /* session id */
+       struct mc_ioctl_buffer buf;     /* buffers info */
+};
+
+/*
+ * Data exchange structure of the MC_IO_ERR ioctl command.
+ */
+struct mc_ioctl_geterr {
+       __u32           sid;            /* session id */
+       __s32           value;          /* error value (out) */
+};
+
+/*
+ * Global MobiCore Version Information.
+ */
+struct mc_version_info {
+       char product_id[MC_PRODUCT_ID_LEN]; /* Product ID string */
+       __u32   version_mci;            /* Mobicore Control Interface */
+       __u32   version_so;             /* Secure Objects */
+       __u32   version_mclf;           /* MobiCore Load Format */
+       __u32   version_container;      /* MobiCore Container Format */
+       __u32   version_mc_config;      /* MobiCore Config. Block Format */
+       __u32   version_tl_api;         /* MobiCore Trustlet API */
+       __u32   version_dr_api;         /* MobiCore Driver API */
+       __u32   version_nwd;            /* This Driver */
+};
+
+/*
+ * GP TA operation structure.
+ */
+struct gp_value {
+       __u32                   a;
+       __u32                   b;
+};
+
+struct gp_temp_memref {
+       __u64                   buffer;
+       __u64                   size;
+};
+
+struct gp_shared_memory {
+       __u64                   buffer;
+       __u64                   size;
+       __u32                   flags;
+};
+
+struct gp_regd_memref {
+       struct gp_shared_memory parent;
+       __u64                   size;
+       __u64                   offset;
+};
+
+union gp_param {
+       struct gp_temp_memref   tmpref;
+       struct gp_regd_memref   memref;
+       struct gp_value         value;
+};
+
+struct gp_operation {
+       __u32                   started;
+       __u32                   param_types;
+       union gp_param          params[4];
+};
+
+struct gp_return {
+       __u32                   origin;
+       __u32                   value;
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_INITIALIZE_CONTEXT ioctl command.
+ */
+struct mc_ioctl_gp_initialize_context {
+       struct gp_return        ret;            /* return origin/value (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_REGISTER_SHARED_MEM ioctl command.
+ */
+struct mc_ioctl_gp_register_shared_mem {
+       struct gp_shared_memory memref;
+       struct gp_return        ret;            /* return origin/value (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_RELEASE_SHARED_MEM ioctl command.
+ */
+struct mc_ioctl_gp_release_shared_mem {
+       struct gp_shared_memory memref;
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_OPEN_SESSION ioctl command.
+ */
+struct mc_ioctl_gp_open_session {
+       struct mc_uuid_t        uuid;           /* trustlet uuid */
+       struct mc_identity      identity;       /* GP TA identity */
+       struct gp_operation     operation;      /* set of parameters */
+       struct gp_return        ret;            /* return origin/value (out) */
+       __u32                   session_id;     /* session id (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_CLOSE_SESSION ioctl command.
+ */
+struct mc_ioctl_gp_close_session {
+       __u32                   session_id;     /* session id */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_INVOKE_COMMAND ioctl command.
+ */
+struct mc_ioctl_gp_invoke_command {
+       struct gp_operation     operation;      /* set of parameters */
+       __u32                   session_id;     /* session id */
+       __u32                   command_id;     /* ID of the command */
+       struct gp_return        ret;            /* return origin/value (out) */
+};
+
+/*
+ * Data exchange structure of the MC_IO_GP_CANCEL ioctl command.
+ */
+struct mc_ioctl_gp_request_cancellation {
+       struct gp_operation     operation;      /* set of parameters */
+};
+
+/*
+ * defines for the ioctl mobicore driver module function call from user space.
+ */
+/* MobiCore IOCTL magic number */
+#define MC_IOC_MAGIC   'M'
+
+/*
+ * Implement corresponding functions from user api
+ */
+#define MC_IO_OPEN_SESSION \
+       _IOWR(MC_IOC_MAGIC, 0, struct mc_ioctl_open_session)
+#define MC_IO_OPEN_TRUSTLET \
+       _IOWR(MC_IOC_MAGIC, 1, struct mc_ioctl_open_trustlet)
+#define MC_IO_CLOSE_SESSION \
+       _IO(MC_IOC_MAGIC, 2)
+#define MC_IO_NOTIFY \
+       _IO(MC_IOC_MAGIC, 3)
+#define MC_IO_WAIT \
+       _IOW(MC_IOC_MAGIC, 4, struct mc_ioctl_wait)
+#define MC_IO_MAP \
+       _IOWR(MC_IOC_MAGIC, 5, struct mc_ioctl_map)
+#define MC_IO_UNMAP \
+       _IOW(MC_IOC_MAGIC, 6, struct mc_ioctl_map)
+#define MC_IO_ERR \
+       _IOWR(MC_IOC_MAGIC, 7, struct mc_ioctl_geterr)
+#define MC_IO_HAS_SESSIONS \
+       _IO(MC_IOC_MAGIC, 8)
+#define MC_IO_VERSION \
+       _IOR(MC_IOC_MAGIC, 9, struct mc_version_info)
+#define MC_IO_GP_INITIALIZE_CONTEXT \
+       _IOW(MC_IOC_MAGIC, 20, struct mc_ioctl_gp_initialize_context)
+#define MC_IO_GP_REGISTER_SHARED_MEM \
+       _IOWR(MC_IOC_MAGIC, 21, struct mc_ioctl_gp_register_shared_mem)
+#define MC_IO_GP_RELEASE_SHARED_MEM \
+       _IOW(MC_IOC_MAGIC, 23, struct mc_ioctl_gp_release_shared_mem)
+#define MC_IO_GP_OPEN_SESSION \
+       _IOWR(MC_IOC_MAGIC, 24, struct mc_ioctl_gp_open_session)
+#define MC_IO_GP_CLOSE_SESSION \
+       _IOW(MC_IOC_MAGIC, 25, struct mc_ioctl_gp_close_session)
+#define MC_IO_GP_INVOKE_COMMAND \
+       _IOWR(MC_IOC_MAGIC, 26, struct mc_ioctl_gp_invoke_command)
+#define MC_IO_GP_REQUEST_CANCELLATION \
+       _IOW(MC_IOC_MAGIC, 27, struct mc_ioctl_gp_request_cancellation)
+
+#endif /* _MC_USER_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mobicore_driver_api.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/public/mobicore_driver_api.h
new file mode 100755 (executable)
index 0000000..335d0b9
--- /dev/null
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MOBICORE_DRIVER_API_H_
+#define _MOBICORE_DRIVER_API_H_
+
+#include "mc_user.h"
+
+#define __MC_CLIENT_LIB_API
+
+/*
+ * Return values of MobiCore driver functions.
+ */
+enum mc_result {
+       /* Function call succeeded. */
+       MC_DRV_OK                               = 0,
+       /* No notification available. */
+       MC_DRV_NO_NOTIFICATION                  = 1,
+       /* Error during notification on communication level. */
+       MC_DRV_ERR_NOTIFICATION                 = 2,
+       /* Function not implemented. */
+       MC_DRV_ERR_NOT_IMPLEMENTED              = 3,
+       /* No more resources available. */
+       MC_DRV_ERR_OUT_OF_RESOURCES             = 4,
+       /* Driver initialization failed. */
+       MC_DRV_ERR_INIT                         = 5,
+       /* Unknown error. */
+       MC_DRV_ERR_UNKNOWN                      = 6,
+       /* The specified device is unknown. */
+       MC_DRV_ERR_UNKNOWN_DEVICE               = 7,
+       /* The specified session is unknown.*/
+       MC_DRV_ERR_UNKNOWN_SESSION              = 8,
+       /* The specified operation is not allowed. */
+       MC_DRV_ERR_INVALID_OPERATION            = 9,
+       /* The response header from the MC is invalid. */
+       MC_DRV_ERR_INVALID_RESPONSE             = 10,
+       /* Function call timed out. */
+       MC_DRV_ERR_TIMEOUT                      = 11,
+       /* Can not allocate additional memory. */
+       MC_DRV_ERR_NO_FREE_MEMORY               = 12,
+       /* Free memory failed. */
+       MC_DRV_ERR_FREE_MEMORY_FAILED           = 13,
+       /* Still some open sessions pending. */
+       MC_DRV_ERR_SESSION_PENDING              = 14,
+       /* MC daemon not reachable */
+       MC_DRV_ERR_DAEMON_UNREACHABLE           = 15,
+       /* The device file of the kernel module could not be opened. */
+       MC_DRV_ERR_INVALID_DEVICE_FILE          = 16,
+       /* Invalid parameter. */
+       MC_DRV_ERR_INVALID_PARAMETER            = 17,
+       /* Unspecified error from Kernel Module*/
+       MC_DRV_ERR_KERNEL_MODULE                = 18,
+       /* Error during mapping of additional bulk memory to session. */
+       MC_DRV_ERR_BULK_MAPPING                 = 19,
+       /* Error during unmapping of additional bulk memory to session. */
+       MC_DRV_ERR_BULK_UNMAPPING               = 20,
+       /* Notification received, exit code available. */
+       MC_DRV_INFO_NOTIFICATION                = 21,
+       /* Set up of NWd connection failed. */
+       MC_DRV_ERR_NQ_FAILED                    = 22,
+       /* Wrong daemon version. */
+       MC_DRV_ERR_DAEMON_VERSION               = 23,
+       /* Wrong container version. */
+       MC_DRV_ERR_CONTAINER_VERSION            = 24,
+       /* System Trustlet public key is wrong. */
+       MC_DRV_ERR_WRONG_PUBLIC_KEY             = 25,
+       /* Wrong container type(s). */
+       MC_DRV_ERR_CONTAINER_TYPE_MISMATCH      = 26,
+       /* Container is locked (or not activated). */
+       MC_DRV_ERR_CONTAINER_LOCKED             = 27,
+       /* SPID is not registered with root container. */
+       MC_DRV_ERR_SP_NO_CHILD                  = 28,
+       /* UUID is not registered with sp container. */
+       MC_DRV_ERR_TL_NO_CHILD                  = 29,
+       /* Unwrapping of root container failed. */
+       MC_DRV_ERR_UNWRAP_ROOT_FAILED           = 30,
+       /* Unwrapping of service provider container failed. */
+       MC_DRV_ERR_UNWRAP_SP_FAILED             = 31,
+       /* Unwrapping of Trustlet container failed. */
+       MC_DRV_ERR_UNWRAP_TRUSTLET_FAILED       = 32,
+       /* No device associated with connection. */
+       MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN       = 33,
+       /* TA blob attestation is incorrect. */
+       MC_DRV_ERR_TA_ATTESTATION_ERROR         = 34,
+       /* Interrupted system call. */
+       MC_DRV_ERR_INTERRUPTED_BY_SIGNAL        = 35,
+       /* Service is blocked and opensession is thus not allowed. */
+       MC_DRV_ERR_SERVICE_BLOCKED              = 36,
+       /* Service is locked and opensession is thus not allowed. */
+       MC_DRV_ERR_SERVICE_LOCKED               = 37,
+       /* Service was killed by the TEE (due to an administrative command). */
+       MC_DRV_ERR_SERVICE_KILLED               = 38,
+       /* All permitted instances to the service are used */
+       MC_DRV_ERR_NO_FREE_INSTANCES            = 39,
+       /* TA blob header is incorrect. */
+       MC_DRV_ERR_TA_HEADER_ERROR              = 40,
+};
+
+/*
+ * Structure of Session Handle, includes the Session ID and the Device ID the
+ * Session belongs to.
+ * The session handle will be used for session-based MobiCore communication.
+ * It will be passed to calls which address a communication end point in the
+ * MobiCore environment.
+ */
+struct mc_session_handle {
+       u32     session_id;             /* MobiCore session ID */
+       u32     device_id;              /* Device ID the session belongs to */
+};
+
+/*
+ * Information structure about additional mapped Bulk buffer between the
+ * Trustlet Connector (NWd) and the Trustlet (SWd). This structure is
+ * initialized from a Trustlet Connector by calling mc_map().
+ * In order to use the memory within a Trustlet the Trustlet Connector has to
+ * inform the Trustlet with the content of this structure via the TCI.
+ */
+struct mc_bulk_map {
+       /*
+        * The virtual address of the Bulk buffer regarding the address space
+        * of the Trustlet, already includes a possible offset!
+        */
+       u32     secure_virt_addr;
+       u32     secure_virt_len;        /* Length of the mapped Bulk buffer */
+};
+
+/* The default device ID */
+#define MC_DEVICE_ID_DEFAULT   0
+/* Wait infinite for a response of the MC. */
+#define MC_INFINITE_TIMEOUT    ((s32)(-1))
+/* Do not wait for a response of the MC. */
+#define MC_NO_TIMEOUT          0
+/* TCI/DCI must not exceed 1MiB */
+#define MC_MAX_TCI_LEN         0x100000
+
+/**
+ * mc_open_device() - Open a new connection to a MobiCore device.
+ * @device_id:         Identifier for the MobiCore device to be used.
+ *                     MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * Initializes all device specific resources required to communicate with a
+ * MobiCore instance located on the specified device in the system. If the
+ * device does not exist the function will return MC_DRV_ERR_UNKNOWN_DEVICE.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_ERR_INVALID_OPERATION:   device already opened
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device_id unknown
+ *     MC_DRV_ERR_INVALID_DEVICE_FILE: kernel module under /dev/mobicore
+ *                                     cannot be opened
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_device(
+       u32                             device_id);
+
+/**
+ * mc_close_device() - Close the connection to a MobiCore device.
+ * @device_id:         Identifier for the MobiCore device.
+ *
+ * When closing a device, active sessions have to be closed beforehand.
+ * Resources associated with the device will be released.
+ * The device may be opened again after it has been closed.
+ *
+ * MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id is invalid
+ *     MC_DRV_ERR_SESSION_PENDING:     a session is still open
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon occur
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_device(
+       u32                             device_id);
+
+/**
+ * mc_open_session() - Open a new session to a Trustlet.
+ * @session:           On success, the session data will be returned
+ * @uuid:              UUID of the Trustlet to be opened
+ * @tci:               TCI buffer for communicating with the Trustlet
+ * @tci_len:           Length of the TCI buffer. Maximum allowed value
+ *                     is MC_MAX_TCI_LEN
+ *
+ * The Trustlet with the given UUID has to be available in the flash filesystem.
+ *
+ * Write MCP open message to buffer and notify MobiCore about the availability
+ * of a new command.
+ *
+ * Waits till the MobiCore responses with the new session ID (stored in the MCP
+ * buffer).
+ *
+ * Note that session.device_id has to be the device id of an opened device.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       session parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id is invalid
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon socket occur
+ *     MC_DRV_ERR_NQ_FAILED:           daemon returns an error
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_session(
+       struct mc_session_handle        *session,
+       const struct mc_uuid_t          *uuid,
+       u8                              *tci,
+       u32                             tci_len);
+
+/**
+ * mc_open_trustlet() - Open a new session to the provided Trustlet.
+ * @session:           On success, the session data will be returned
+ * @spid:              Service Provider ID (for SP trustlets otherwise ignored)
+ * @trustlet           Memory buffer containing the Trusted Application binary
+ * @trustlet_len       Trusted Application length
+ * @tci:               TCI buffer for communicating with the Trustlet
+ * @tci_len:           Length of the TCI buffer. Maximum allowed value
+ *                     is MC_MAX_TCI_LEN
+ *
+ * Write MCP open message to buffer and notify MobiCore about the availability
+ * of a new command.
+ *
+ * Waits till the MobiCore responses with the new session ID (stored in the MCP
+ * buffer).
+ *
+ * Note that session.device_id has to be the device id of an opened device.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       session parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id is invalid
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon socket occur
+ *     MC_DRV_ERR_NQ_FAILED:           daemon returns an error
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_trustlet(
+       struct mc_session_handle        *session,
+       u32                             spid,
+       u8                              *trustlet,
+       u32                             trustlet_len,
+       u8                              *tci,
+       u32                             len);
+
+/**
+ * mc_close_session() - Close a Trustlet session.
+ * @session:           Session to be closed.
+ *
+ * Closes the specified MobiCore session. The call will block until the
+ * session has been closed.
+ *
+ * Device device_id has to be opened in advance.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       session parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_SESSION:     session id is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id of session is invalid
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon occur
+ *     MC_DRV_ERR_INVALID_DEVICE_FILE: daemon cannot open Trustlet file
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_session(
+       struct mc_session_handle        *session);
+
+/**
+ * mc_notify() - Notify a session.
+ * @session:           The session to be notified.
+ *
+ * Notifies the session end point about available message data.
+ * If the session parameter is correct, notify will always succeed.
+ * Corresponding errors can only be received by mc_wait_notification().
+ *
+ * A session has to be opened in advance.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       session parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_SESSION:     session id is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_notify(
+       struct mc_session_handle        *session);
+
+/**
+ * mc_wait_notification() - Wait for a notification.
+ * @session:           The session the notification should correspond to.
+ * @timeout:           Time in milliseconds to wait
+ *                     (MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
+ *                      MC_INFINITE_TIMEOUT : wait infinitely)
+ *
+ * Wait for a notification issued by the MobiCore for a specific session.
+ * The timeout parameter specifies the number of milliseconds the call will wait
+ * for a notification.
+ *
+ * If the caller passes 0 as timeout value the call will immediately return.
+ * If timeout value is below 0 the call will block until a notification for the
+ * session has been received.
+ *
+ * If timeout is below 0, call will block.
+ *
+ * Caller has to trust the other side to send a notification to wake him up
+ * again.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_ERR_TIMEOUT:             no notification arrived in time
+ *     MC_DRV_INFO_NOTIFICATION:       a problem with the session was
+ *                                     encountered. Get more details with
+ *                                     mc_get_session_error_code()
+ *     MC_DRV_ERR_NOTIFICATION:        a problem with the socket occurred
+ *     MC_DRV_INVALID_PARAMETER:       a parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_SESSION:     session id is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
+       struct mc_session_handle        *session,
+       s32                             timeout);
+
+/**
+ * mc_malloc_wsm() - Allocate a block of world shared memory (WSM).
+ * @device_id:         The ID of an opened device to retrieve the WSM from.
+ * @align:             The alignment (number of pages) of the memory block
+ *                     (e.g. 0x00000001 for 4kb).
+ * @len:               Length of the block in bytes.
+ * @wsm:               Virtual address of the world shared memory block.
+ * @wsm_flags:         Platform specific flags describing the memory to
+ *                     be allocated.
+ *
+ * The MC driver allocates a contiguous block of memory which can be used as
+ * WSM.
+ * This implicates that the allocated memory is aligned according to the
+ * alignment parameter.
+ *
+ * Always returns a buffer of size WSM_SIZE aligned to 4K.
+ *
+ * Align and wsm_flags are currently ignored
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       a parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id is invalid
+ *     MC_DRV_ERR_NO_FREE_MEMORY:      no more contiguous memory is
+ *                                     available in this size or for this
+ *                                     process
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
+       u32                             device_id,
+       u32                             align,
+       u32                             len,
+       u8                              **wsm,
+       u32                             wsm_flags);
+
+/**
+ * mc_free_wsm() - Free a block of world shared memory (WSM).
+ * @device_id:         The ID to which the given address belongs
+ * @wsm:               Address of WSM block to be freed
+ *
+ * The MC driver will free a block of world shared memory (WSM) previously
+ * allocated with mc_malloc_wsm(). The caller has to assure that the address
+ * handed over to the driver is a valid WSM address.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       a parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      when device id is invalid
+ *     MC_DRV_ERR_FREE_MEMORY_FAILED:  on failure
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(
+       u32                             device_id,
+       u8                              *wsm);
+
+/**
+ *mc_map() -   Map additional bulk buffer between a Trustlet Connector (TLC)
+ *             and the Trustlet (TL) for a session
+ * @session:           Session handle with information of the device_id and
+ *                     the session_id. The given buffer is mapped to the
+ *                     session specified in the sessionHandle
+ * @buf:               Virtual address of a memory portion (relative to TLC)
+ *                     to be shared with the Trustlet, already includes a
+ *                     possible offset!
+ * @len:               length of buffer block in bytes.
+ * @map_info:          Information structure about the mapped Bulk buffer
+ *                     between the TLC (NWd) and the TL (SWd).
+ *
+ * Memory allocated in user space of the TLC can be mapped as additional
+ * communication channel (besides TCI) to the Trustlet. Limitation of the
+ * Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
+ * chunk size of 1 MiB each.
+ *
+ * It is up to the application layer (TLC) to inform the Trustlet
+ * about the additional mapped bulk memory.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       a parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_SESSION:     session id is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id of session is invalid
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon occur
+ *     MC_DRV_ERR_BULK_MAPPING:        buf is already uses as bulk buffer or
+ *                                     when registering the buffer failed
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_map(
+       struct mc_session_handle        *session,
+       void                            *buf,
+       u32                             len,
+       struct mc_bulk_map              *map_info);
+
+/**
+ * mc_unmap() -        Remove additional mapped bulk buffer between Trustlet Connector
+ *             (TLC) and the Trustlet (TL) for a session
+ * @session:           Session handle with information of the device_id and
+ *                     the session_id. The given buffer is unmapped from the
+ *                     session specified in the sessionHandle.
+ * @buf:               Virtual address of a memory portion (relative to TLC)
+ *                     shared with the TL, already includes a possible offset!
+ * @map_info:          Information structure about the mapped Bulk buffer
+ *                     between the TLC (NWd) and the TL (SWd)
+ *
+ * The bulk buffer will immediately be unmapped from the session context.
+ *
+ * The application layer (TLC) must inform the TL about unmapping of the
+ * additional bulk memory before calling mc_unmap!
+ *
+ * The clientlib currently ignores the len field in map_info.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       a parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_SESSION:     session id is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id of session is invalid
+ *     MC_DRV_ERR_DAEMON_UNREACHABLE:  problems with daemon occur
+ *     MC_DRV_ERR_BULK_UNMAPPING:      buf was not registered earlier
+ *                                     or when unregistering failed
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_unmap(
+       struct mc_session_handle        *session,
+       void                            *buf,
+       struct mc_bulk_map              *map_info);
+
+/*
+ * mc_get_session_error_code() - Get additional error information of the last
+ *                              error that occurred on a session.
+ * @session:           Session handle with information of the device_id and
+ *                     the session_id
+ * @exit_code:         >0 Trustlet has terminated itself with this value,
+ *                     <0 Trustlet is dead because of an error within the
+ *                     MobiCore (e.g. Kernel exception). See also MCI
+ *                     definition.
+ *
+ * After the request the stored error code will be deleted.
+ *
+ * Return codes:
+ *     MC_DRV_OK:                      operation completed successfully
+ *     MC_DRV_INVALID_PARAMETER:       a parameter is invalid
+ *     MC_DRV_ERR_UNKNOWN_SESSION:     session id is invalid
+ *     MC_DRV_ERR_UNKNOWN_DEVICE:      device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
+       struct mc_session_handle        *session,
+       s32                             *exit_code);
+
+#endif /* _MOBICORE_DRIVER_API_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/session.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/session.c
new file mode 100755 (executable)
index 0000000..7ecfc5d
--- /dev/null
@@ -0,0 +1,992 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <crypto/hash.h>
+#include <linux/scatterlist.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <net/sock.h>          /* sockfd_lookup */
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/clock.h> /* local_clock */
+#include <linux/sched/task.h>  /* put_task_struct */
+#endif
+
+#include "public/mc_user.h"
+#include "public/mc_admin.h"
+
+#if KERNEL_VERSION(3, 5, 0) <= LINUX_VERSION_CODE
+#include <linux/uidgid.h>
+#else
+#define kuid_t uid_t
+#define kgid_t gid_t
+#define KGIDT_INIT(value) ((kgid_t)value)
+
+static inline uid_t __kuid_val(kuid_t uid)
+{
+       return uid;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+       return gid;
+}
+
+static inline bool gid_eq(kgid_t left, kgid_t right)
+{
+       return __kgid_val(left) == __kgid_val(right);
+}
+
+static inline bool gid_gt(kgid_t left, kgid_t right)
+{
+       return __kgid_val(left) > __kgid_val(right);
+}
+
+static inline bool gid_lt(kgid_t left, kgid_t right)
+{
+       return __kgid_val(left) < __kgid_val(right);
+}
+#endif
+#include "main.h"
+#include "mmu.h"               /* tee_mmu_buffer, tee_mmu_debug_structs */
+#include "iwp.h"
+#include "mcp.h"
+#include "client.h"            /* *cbuf* */
+#include "session.h"
+#include "mci/mcimcp.h"                /* WSM_INVALID */
+
+#define SHA1_HASH_SIZE       20
+
+static int wsm_create(struct tee_session *session, struct tee_wsm *wsm,
+                     const struct mc_ioctl_buffer *buf)
+{
+       if (wsm->in_use) {
+               mc_dev_err(-EINVAL, "wsm already in use");
+               return -EINVAL;
+       }
+
+       wsm->mmu = client_mmu_create(session->client, buf, &wsm->cbuf);
+       if (IS_ERR(wsm->mmu))
+               return PTR_ERR(wsm->mmu);
+
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_wsms);
+       wsm->va = buf->va;
+       wsm->len = buf->len;
+       wsm->flags = buf->flags;
+       wsm->in_use = true;
+       return 0;
+}
+
+static int wsm_wrap(struct tee_session *session, struct tee_wsm *wsm,
+                   struct tee_mmu *mmu)
+{
+       struct mcp_buffer_map map;
+
+       if (wsm->in_use) {
+               mc_dev_err(-EINVAL, "wsm already in use");
+               return -EINVAL;
+       }
+
+       wsm->mmu = mmu;
+       tee_mmu_get(wsm->mmu);
+
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_wsms);
+       tee_mmu_buffer(wsm->mmu, &map);
+       wsm->va = 0;
+       wsm->len = map.length;
+       wsm->flags = map.flags;
+       wsm->in_use = true;
+       return 0;
+}
+
+/*
+ * Free a WSM object, must be called under the session's wsms_lock
+ */
+static void wsm_free(struct tee_session *session, struct tee_wsm *wsm)
+{
+       if (!wsm->in_use) {
+               mc_dev_err(-EINVAL, "wsm not in use");
+               return;
+       }
+
+       mc_dev_devel("free wsm %p: mmu %p cbuf %p va %lx len %u sva %x",
+                    wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->sva);
+       /* Free MMU table */
+       tee_mmu_put(wsm->mmu);
+       if (wsm->cbuf)
+               tee_cbuf_put(wsm->cbuf);
+
+       /* Decrement debug counter */
+       atomic_dec(&g_ctx.c_wsms);
+       wsm->in_use = false;
+}
+
+#if KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE
+static int hash_path_and_data(struct task_struct *task, u8 *hash,
+                             const void *data, unsigned int data_len)
+{
+       struct mm_struct *mm = task->mm;
+       struct crypto_shash *tfm;
+       struct shash_desc *desc;
+       size_t desc_size;
+       char *buf;
+       char *path;
+       unsigned int path_len;
+       int ret = 0;
+
+       buf = (char *)__get_free_page(GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       if (!mm->exe_file) {
+               ret = -ENOENT;
+               goto end;
+       }
+
+       path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
+       if (IS_ERR(path)) {
+               ret = PTR_ERR(path);
+               goto end;
+       }
+
+       mc_dev_devel("process path =");
+       {
+               char *c;
+
+               for (c = path; *c; c++)
+                       mc_dev_devel("%c %d", *c, *c);
+       }
+
+       path_len = (unsigned int)strnlen(path, PAGE_SIZE);
+       mc_dev_devel("path_len = %u", path_len);
+       /* Compute hash of path */
+       tfm = crypto_alloc_shash("sha1", 0, 0);
+       if (IS_ERR(tfm)) {
+               ret = PTR_ERR(tfm);
+               mc_dev_err(ret, "cannot allocate shash");
+               goto end;
+       }
+
+       desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+       desc = kzalloc(desc_size, GFP_KERNEL);
+       if (!desc) {
+               ret = -ENOMEM;
+               goto err_desc;
+       }
+
+       desc->tfm = tfm;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       crypto_shash_init(desc);
+       crypto_shash_update(desc, (u8 *)path, path_len);
+       if (data) {
+               mc_dev_devel("hashing additional data");
+               crypto_shash_update(desc, data, data_len);
+       }
+
+       crypto_shash_final(desc, hash);
+       shash_desc_zero(desc);
+       kfree(desc);
+err_desc:
+       crypto_free_shash(tfm);
+end:
+       up_read(&mm->mmap_sem);
+       free_page((unsigned long)buf);
+
+       return ret;
+}
+#else
+static int hash_path_and_data(struct task_struct *task, u8 *hash,
+                             const void *data, unsigned int data_len)
+{
+       struct mm_struct *mm = task->mm;
+       struct hash_desc desc;
+       struct scatterlist sg;
+       char *buf;
+       char *path;
+       unsigned int path_len;
+       int ret = 0;
+
+       buf = (char *)__get_free_page(GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       down_read(&mm->mmap_sem);
+       if (!mm->exe_file) {
+               ret = -ENOENT;
+               goto end;
+       }
+
+       path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
+       if (IS_ERR(path)) {
+               ret = PTR_ERR(path);
+               goto end;
+       }
+
+       mc_dev_devel("process path =");
+       {
+               char *c;
+
+               for (c = path; *c; c++)
+                       mc_dev_devel("%c %d", *c, *c);
+       }
+
+       path_len = (unsigned int)strnlen(path, PAGE_SIZE);
+       mc_dev_devel("path_len = %u", path_len);
+       /* Compute hash of path */
+       desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(desc.tfm)) {
+               ret = PTR_ERR(desc.tfm);
+               mc_dev_devel("could not alloc hash = %d", ret);
+               goto end;
+       }
+
+       desc.flags = 0;
+       sg_init_one(&sg, path, path_len);
+       crypto_hash_init(&desc);
+       crypto_hash_update(&desc, &sg, path_len);
+       if (data) {
+               mc_dev_devel("current process path: hashing additional data");
+               sg_init_one(&sg, data, data_len);
+               crypto_hash_update(&desc, &sg, data_len);
+       }
+
+       crypto_hash_final(&desc, hash);
+       crypto_free_hash(desc.tfm);
+
+end:
+       up_read(&mm->mmap_sem);
+       free_page((unsigned long)buf);
+
+       return ret;
+}
+#endif
+
+#if KERNEL_VERSION(4, 9, 0) <= LINUX_VERSION_CODE
+#define GROUP_AT(gi, i) ((gi)->gid[i])
+#endif
+
+/*
+ * groups_search is not EXPORTed so copied from kernel/groups.c
+ * a simple bsearch
+ */
+static int has_group(const struct cred *cred, gid_t id_gid)
+{
+       const struct group_info *group_info = cred->group_info;
+       unsigned int left, right;
+       kgid_t gid = KGIDT_INIT(id_gid);
+
+       if (gid_eq(gid, cred->fsgid) || gid_eq(gid, cred->egid))
+               return 1;
+
+       if (!group_info)
+               return 0;
+
+       left = 0;
+       right = group_info->ngroups;
+       while (left < right) {
+               unsigned int mid = (left + right) / 2;
+
+               if (gid_gt(gid, GROUP_AT(group_info, mid)))
+                       left = mid + 1;
+               else if (gid_lt(gid, GROUP_AT(group_info, mid)))
+                       right = mid;
+               else
+                       return 1;
+       }
+       return 0;
+}
+
+static int check_prepare_identity(const struct mc_identity *identity,
+                                 struct identity *mcp_identity,
+                                 struct task_struct *task)
+{
+       struct mc_identity *mcp_id = (struct mc_identity *)mcp_identity;
+       u8 hash[SHA1_HASH_SIZE] = { 0 };
+       bool application = false;
+       const void *data;
+       unsigned int data_len;
+
+       /* Copy login type */
+       mcp_identity->login_type = identity->login_type;
+
+       if (identity->login_type == LOGIN_PUBLIC ||
+           identity->login_type == TEEC_TT_LOGIN_KERNEL)
+               return 0;
+
+       /* Fill in uid field */
+       if (identity->login_type == LOGIN_USER ||
+           identity->login_type == LOGIN_USER_APPLICATION) {
+               /* Set euid and ruid of the process. */
+               mcp_id->uid.euid = __kuid_val(task_euid(task));
+               mcp_id->uid.ruid = __kuid_val(task_uid(task));
+       }
+
+       /* Check gid field */
+       if (identity->login_type == LOGIN_GROUP ||
+           identity->login_type == LOGIN_GROUP_APPLICATION) {
+               const struct cred *cred = __task_cred(task);
+
+               /*
+                * Check if gid is one of: egid of the process, its rgid or one
+                * of its supplementary groups
+                */
+               if (!has_group(cred, identity->gid)) {
+                       mc_dev_err(-EACCES, "group %d not allowed",
+                                  identity->gid);
+                       return -EACCES;
+               }
+
+               mc_dev_devel("group %d found", identity->gid);
+               mcp_id->gid = identity->gid;
+       }
+
+       switch (identity->login_type) {
+       case LOGIN_PUBLIC:
+       case LOGIN_USER:
+       case LOGIN_GROUP:
+               break;
+       case LOGIN_APPLICATION:
+               application = true;
+               data = NULL;
+               data_len = 0;
+               break;
+       case LOGIN_USER_APPLICATION:
+               application = true;
+               data = &mcp_id->uid;
+               data_len = sizeof(mcp_id->uid);
+               break;
+       case LOGIN_GROUP_APPLICATION:
+               application = true;
+               data = &identity->gid;
+               data_len = sizeof(identity->gid);
+               break;
+       default:
+               /* Any other login_type value is invalid. */
+               mc_dev_err(-EINVAL, "Invalid login type %d",
+                          identity->login_type);
+               return -EINVAL;
+       }
+
+       if (application) {
+               int ret = hash_path_and_data(task, hash, data, data_len);
+
+               if (ret) {
+                       mc_dev_devel("hash calculation returned %d", ret);
+                       return ret;
+               }
+
+               memcpy(&mcp_id->login_data, hash, sizeof(mcp_id->login_data));
+       }
+
+       return 0;
+}
+
+/*
+ * Create a session object.
+ * Note: object is not attached to client yet.
+ */
+struct tee_session *session_create(struct tee_client *client,
+                                  const struct mc_identity *identity)
+{
+       struct tee_session *session;
+       struct identity mcp_identity;
+
+       if (!IS_ERR_OR_NULL(identity)) {
+               /* Check identity method and data. */
+               int ret;
+
+               ret = check_prepare_identity(identity, &mcp_identity, current);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+
+       /* Allocate session object */
+       session = kzalloc(sizeof(*session), GFP_KERNEL);
+       if (!session)
+               return ERR_PTR(-ENOMEM);
+
+       /* Increment debug counter */
+       atomic_inc(&g_ctx.c_sessions);
+       /* Initialise object members */
+       if (identity) {
+               session->is_gp = true;
+               iwp_session_init(&session->iwp_session, &mcp_identity);
+       } else {
+               session->is_gp = false;
+               mcp_session_init(&session->mcp_session);
+       }
+
+       client_get(client);
+       session->client = client;
+       kref_init(&session->kref);
+       INIT_LIST_HEAD(&session->list);
+       mutex_init(&session->wsms_lock);
+       mc_dev_devel("created session %p: client %p",
+                    session, session->client);
+       return session;
+}
+
+/*
+ * Free session object and all objects it contains (wsm).
+ */
+static void session_release(struct kref *kref)
+{
+       struct tee_session *session;
+       int i;
+
+       /* Remove remaining shared buffers (unmapped in SWd by mcp_close) */
+       session = container_of(kref, struct tee_session, kref);
+       for (i = 0; i < MC_MAP_MAX; i++) {
+               if (!session->wsms[i].in_use)
+                       continue;
+
+               mc_dev_devel("session %p: free wsm #%d", session, i);
+               wsm_free(session, &session->wsms[i]);
+               /* Buffer unmapped by SWd */
+               atomic_dec(&g_ctx.c_maps);
+       }
+
+       if (session->tci.in_use) {
+               mc_dev_devel("session %p: free tci", session);
+               wsm_free(session, &session->tci);
+       }
+
+       if (session->is_gp)
+               mc_dev_devel("freed GP session %p: client %p id %x", session,
+                            session->client, session->iwp_session.sid);
+       else
+               mc_dev_devel("freed MC session %p: client %p id %x", session,
+                            session->client, session->mcp_session.sid);
+
+       client_put(session->client);
+       kfree(session);
+       /* Decrement debug counter */
+       atomic_dec(&g_ctx.c_sessions);
+}
+
+/*
+ * Unreference session.
+ * Free session object if reference reaches 0.
+ */
+int session_put(struct tee_session *session)
+{
+       return kref_put(&session->kref, session_release);
+}
+
+static int wsm_debug_structs(struct kasnprintf_buf *buf, struct tee_wsm *wsm,
+                            int no)
+{
+       ssize_t ret;
+
+       if (!wsm->in_use)
+               return 0;
+
+       ret = kasnprintf(buf, "\t\t");
+       if (no < 0)
+               ret = kasnprintf(buf, "tci %pK: cbuf %pK va %pK len %u\n",
+                                wsm, wsm->cbuf, (void *)wsm->va, wsm->len);
+       else if (wsm->in_use)
+               ret = kasnprintf(buf,
+                                "wsm #%d: cbuf %pK va %pK len %u sva %x\n",
+                                no, wsm->cbuf, (void *)wsm->va, wsm->len,
+                                wsm->sva);
+
+       if (ret < 0)
+               return ret;
+
+       if (wsm->mmu) {
+               ret = tee_mmu_debug_structs(buf, wsm->mmu);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int session_mc_open_session(struct tee_session *session,
+                           struct mcp_open_info *info)
+{
+       struct tee_wsm *wsm = &session->tci;
+       bool tci_in_use = false;
+       int ret;
+
+       /* Check that tci and its length make sense */
+       if (info->tci_len > MC_MAX_TCI_LEN)
+               return -EINVAL;
+
+       if (!info->tci_va != !info->tci_len) {
+               mc_dev_devel("TCI pointer and length are inconsistent");
+               return -EINVAL;
+       }
+
+       /* Add existing TCI map */
+       if (info->tci_mmu) {
+               ret = wsm_wrap(session, wsm, info->tci_mmu);
+               if (ret)
+                       return ret;
+
+               tci_in_use = true;
+               mc_dev_devel("wrapped tci: mmu %p len %u flags %x",
+                            wsm->mmu, wsm->len, wsm->flags);
+       }
+
+       /* Create mapping for TCI */
+       if (info->tci_va) {
+               struct mc_ioctl_buffer buf = {
+                       .va = info->tci_va,
+                       .len = info->tci_len,
+                       .flags = MC_IO_MAP_INPUT_OUTPUT,
+               };
+
+               ret = wsm_create(session, wsm, &buf);
+               if (ret)
+                       return ret;
+
+               tci_in_use = true;
+               info->tci_mmu = wsm->mmu;
+               mc_dev_devel(
+                       "created tci: mmu %p cbuf %p va %lx len %u flags %x",
+                       wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->flags);
+       }
+
+       ret = mcp_open_session(&session->mcp_session, info, &tci_in_use);
+       if (info->tci_va && (ret || !tci_in_use))
+               wsm_free(session, &session->tci);
+
+       return ret;
+}
+
+/*
+ * Close session and unreference session object.
+ * Session object is assumed to have been removed from main list, which means
+ * that session_close cannot be called anymore.
+ */
+int session_close(struct tee_session *session)
+{
+       int ret;
+
+       if (session->is_gp) {
+               ret = iwp_close_session(&session->iwp_session);
+               if (!ret)
+                       mc_dev_devel("closed GP session %x",
+                                    session->iwp_session.sid);
+       } else {
+               ret = mcp_close_session(&session->mcp_session);
+               if (!ret)
+                       mc_dev_devel("closed MC session %x",
+                                    session->mcp_session.sid);
+       }
+       return ret;
+}
+
+/*
+ * Session is to be removed from NWd records as SWd is dead
+ */
+int session_mc_cleanup_session(struct tee_session *session)
+{
+       mcp_cleanup_session(&session->mcp_session);
+       return session_put(session);
+}
+
+/*
+ * Send a notification to TA
+ */
+int session_mc_notify(struct tee_session *session)
+{
+       if (!session) {
+               mc_dev_devel("Session pointer is null");
+               return -EINVAL;
+       }
+
+       return mcp_notify(&session->mcp_session);
+}
+
+/*
+ * Sleep until next notification from SWd.
+ */
+int session_mc_wait(struct tee_session *session, s32 timeout,
+                   bool silent_expiry)
+{
+       return mcp_wait(&session->mcp_session, timeout, silent_expiry);
+}
+
+/*
+ * Share buffers with SWd and add corresponding WSM objects to session.
+ * This may involve some re-use or cleanup of inactive mappings.
+ */
+int session_mc_map(struct tee_session *session, struct tee_mmu *mmu,
+                  struct mc_ioctl_buffer *buf)
+{
+       struct tee_wsm *wsm;
+       u32 sva;
+       int i, ret;
+
+       mutex_lock(&session->wsms_lock);
+       /* Look for an available slot in the session WSMs array */
+       for (i = 0; i < MC_MAP_MAX; i++)
+               if (!session->wsms[i].in_use)
+                       break;
+
+       if (i == MC_MAP_MAX) {
+               ret = -EPERM;
+               mc_dev_devel("no available WSM slot in session %x",
+                            session->mcp_session.sid);
+               goto out;
+       }
+
+       wsm = &session->wsms[i];
+       if (!mmu)
+               ret = wsm_create(session, wsm, buf);
+       else
+               ret = wsm_wrap(session, wsm, mmu);
+
+       if (ret) {
+               mc_dev_devel("maps[%d] va=%llx create failed: %d",
+                            i, buf->va, ret);
+               goto out;
+       }
+
+       mc_dev_devel("created wsm #%d: mmu %p cbuf %p va %lx len %u flags %x",
+                    i, wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->flags);
+       ret = mcp_map(session->mcp_session.sid, wsm->mmu, &sva);
+       if (ret) {
+               wsm_free(session, wsm);
+       } else {
+               buf->sva = sva;
+               wsm->sva = sva;
+       }
+
+out:
+       mutex_unlock(&session->wsms_lock);
+       mc_dev_devel("ret=%d", ret);
+       return ret;
+}
+
+/*
+ * In theory, stop sharing buffers with the SWd. In fact, mark them inactive.
+ */
+int session_mc_unmap(struct tee_session *session,
+                    const struct mc_ioctl_buffer *buf)
+{
+       struct tee_wsm *wsm;
+       struct mcp_buffer_map map;
+       int i, ret = -EINVAL;
+
+       mutex_lock(&session->wsms_lock);
+       /* Look for buffer in the session WSMs array */
+       for (i = 0; i < MC_MAP_MAX; i++)
+               if (session->wsms[i].in_use &&
+                   buf->va == session->wsms[i].va &&
+                   buf->len == session->wsms[i].len &&
+                   buf->sva == session->wsms[i].sva)
+                       break;
+
+       if (i == MC_MAP_MAX) {
+               ret = -EINVAL;
+               mc_dev_devel("maps[%d] va=%llx sva=%llx not found",
+                            i, buf[i].va, buf[i].sva);
+               goto out;
+       }
+
+       wsm = &session->wsms[i];
+       tee_mmu_buffer(wsm->mmu, &map);
+       map.secure_va = wsm->sva;
+
+       ret = mcp_unmap(session->mcp_session.sid, &map);
+       if (!ret)
+               wsm_free(session, wsm);
+
+out:
+       mutex_unlock(&session->wsms_lock);
+       return ret;
+}
+
+/*
+ * Read and clear last notification received from TA
+ */
+int session_mc_get_err(struct tee_session *session, s32 *err)
+{
+       return mcp_get_err(&session->mcp_session, err);
+}
+
+static void unmap_gp_bufs(struct tee_session *session,
+                         struct iwp_buffer_map *maps)
+{
+       int i;
+
+       /* Create WSMs from bufs */
+       mutex_lock(&session->wsms_lock);
+       for (i = 0; i < MC_MAP_MAX; i++) {
+               if (session->wsms[i].in_use)
+                       wsm_free(session, &session->wsms[i]);
+
+               if (maps[i].sva)
+                       client_put_cwsm_sva(session->client, maps[i].sva);
+       }
+       mutex_unlock(&session->wsms_lock);
+}
+
+static int map_gp_bufs(struct tee_session *session,
+                      const struct mc_ioctl_buffer *bufs,
+                      struct gp_shared_memory **parents,
+                      struct iwp_buffer_map *maps)
+{
+       int i, ret = 0;
+
+       /* Create WSMs from bufs */
+       mutex_lock(&session->wsms_lock);
+       for (i = 0; i < MC_MAP_MAX; i++) {
+               /* Reset reference for temporary memory */
+               maps[i].map.addr = 0;
+               /* Reset reference for registered memory */
+               maps[i].sva = 0;
+               if (bufs[i].va) {
+                       /* Temporary memory, needs mapping */
+                       ret = wsm_create(session, &session->wsms[i], &bufs[i]);
+                       if (ret) {
+                               mc_dev_devel(
+                                       "maps[%d] va=%llx create failed: %d",
+                                       i, bufs[i].va, ret);
+                               break;
+                       }
+
+                       tee_mmu_buffer(session->wsms[i].mmu, &maps[i].map);
+               } else if (parents[i]) {
+                       /* Registered memory, already mapped */
+                       maps[i].sva = client_get_cwsm_sva(session->client,
+                                                         parents[i]);
+                       if (!maps[i].sva) {
+                               ret = -EINVAL;
+                               mc_dev_devel("couldn't find shared mem");
+                               break;
+                       }
+
+                       mc_dev_devel("param[%d] has sva %x", i, maps[i].sva);
+               }
+       }
+       mutex_unlock(&session->wsms_lock);
+
+       /* Failed above */
+       if (i < MC_MAP_MAX)
+               unmap_gp_bufs(session, maps);
+
+       return ret;
+}
+
+int session_gp_open_session(struct tee_session *session,
+                           const struct mc_uuid_t *uuid,
+                           struct gp_operation *operation,
+                           struct gp_return *gp_ret)
+{
+       /* TEEC_MEMREF_TEMP_* buffers to map */
+       struct mc_ioctl_buffer bufs[MC_MAP_MAX];
+       struct iwp_buffer_map maps[MC_MAP_MAX];
+       struct gp_shared_memory *parents[MC_MAP_MAX] = { NULL };
+       struct client_gp_operation client_operation;
+       int ret = 0;
+
+       ret = iwp_open_session_prepare(&session->iwp_session, operation, bufs,
+                                      parents, gp_ret);
+       if (ret)
+               return ret;
+
+       /* Create WSMs from bufs */
+       ret = map_gp_bufs(session, bufs, parents, maps);
+       if (ret) {
+               iwp_open_session_abort(&session->iwp_session);
+               return iwp_set_ret(ret, gp_ret);
+       }
+
+       /* Tell client about operation */
+       client_operation.started = operation->started;
+       client_operation.slot = iwp_session_slot(&session->iwp_session);
+       client_operation.cancelled = false;
+       if (!client_gp_operation_add(session->client, &client_operation)) {
+               iwp_open_session_abort(&session->iwp_session);
+               return iwp_set_ret(-ECANCELED, gp_ret);
+       }
+
+       /* Open/call TA */
+       ret = iwp_open_session(&session->iwp_session, uuid, operation, maps,
+                              NULL, NULL, gp_ret);
+       /* Cleanup */
+       client_gp_operation_remove(session->client, &client_operation);
+       unmap_gp_bufs(session, maps);
+       return ret;
+}
+
+int session_gp_open_session_domu(struct tee_session *session,
+                                const struct mc_uuid_t *uuid, u64 started,
+                                struct interworld_session *iws,
+                                struct tee_mmu **mmus,
+                                struct gp_return *gp_ret)
+{
+       /* TEEC_MEMREF_TEMP_* buffers to map */
+       struct client_gp_operation client_operation;
+       int ret = 0;
+
+       ret = iwp_open_session_prepare(&session->iwp_session, NULL, NULL, NULL,
+                                      gp_ret);
+       if (ret)
+               return ret;
+
+       /* Tell client about operation */
+       client_operation.started = started;
+       client_operation.slot = iwp_session_slot(&session->iwp_session);
+       client_operation.cancelled = false;
+       if (!client_gp_operation_add(session->client, &client_operation)) {
+               iwp_open_session_abort(&session->iwp_session);
+               return iwp_set_ret(-ECANCELED, gp_ret);
+       }
+
+       /* Open/call TA */
+       ret = iwp_open_session(&session->iwp_session, uuid, NULL, NULL, iws,
+                              mmus, gp_ret);
+       /* Cleanup */
+       client_gp_operation_remove(session->client, &client_operation);
+       return ret;
+}
+
+int session_gp_invoke_command(struct tee_session *session, u32 command_id,
+                             struct gp_operation *operation,
+                             struct gp_return *gp_ret)
+{
+       /* TEEC_MEMREF_TEMP_* buffers to map */
+       struct mc_ioctl_buffer bufs[4];
+       struct iwp_buffer_map maps[MC_MAP_MAX];
+       struct gp_shared_memory *parents[MC_MAP_MAX] = { NULL };
+       struct client_gp_operation client_operation;
+       int ret = 0;
+
+       ret = iwp_invoke_command_prepare(&session->iwp_session, command_id,
+                                        operation, bufs, parents, gp_ret);
+       if (ret)
+               return ret;
+
+       /* Create WSMs from bufs */
+       ret = map_gp_bufs(session, bufs, parents, maps);
+       if (ret) {
+               iwp_invoke_command_abort(&session->iwp_session);
+               return iwp_set_ret(ret, gp_ret);
+       }
+
+       /* Tell client about operation */
+       client_operation.started = operation->started;
+       client_operation.slot = iwp_session_slot(&session->iwp_session);
+       client_operation.cancelled = false;
+       if (!client_gp_operation_add(session->client, &client_operation)) {
+               iwp_invoke_command_abort(&session->iwp_session);
+               return iwp_set_ret(-ECANCELED, gp_ret);
+       }
+
+       /* Call TA */
+       ret = iwp_invoke_command(&session->iwp_session, operation, maps, NULL,
+                                NULL, gp_ret);
+       /* Cleanup */
+       client_gp_operation_remove(session->client, &client_operation);
+       unmap_gp_bufs(session, maps);
+       return ret;
+}
+
+int session_gp_invoke_command_domu(struct tee_session *session,
+                                  u64 started, struct interworld_session *iws,
+                                  struct tee_mmu **mmus,
+                                  struct gp_return *gp_ret)
+{
+       struct client_gp_operation client_operation;
+       int ret = 0;
+
+       ret = iwp_invoke_command_prepare(&session->iwp_session, 0, NULL, NULL,
+                                        NULL, gp_ret);
+       if (ret)
+               return ret;
+
+       /* Tell client about operation */
+       client_operation.started = started;
+       client_operation.slot = iwp_session_slot(&session->iwp_session);
+       client_operation.cancelled = false;
+       if (!client_gp_operation_add(session->client, &client_operation)) {
+               iwp_invoke_command_abort(&session->iwp_session);
+               return iwp_set_ret(-ECANCELED, gp_ret);
+       }
+
+       /* Call TA */
+       ret = iwp_invoke_command(&session->iwp_session, NULL, NULL, iws, mmus,
+                                gp_ret);
+       /* Cleanup */
+       client_gp_operation_remove(session->client, &client_operation);
+       return ret;
+}
+
+int session_gp_request_cancellation(u64 slot)
+{
+       return iwp_request_cancellation(slot);
+}
+
+int session_debug_structs(struct kasnprintf_buf *buf,
+                         struct tee_session *session, bool is_closing)
+{
+       const char *type;
+       u32 session_id;
+       s32 err;
+       int i, ret;
+
+       if (session->is_gp) {
+               session_id = session->iwp_session.sid;
+               err = 0;
+               type = "GP";
+       } else {
+               session_id = session->mcp_session.sid;
+               session_mc_get_err(session, &err);
+               type = "MC";
+       }
+
+       ret = kasnprintf(buf, "\tsession %pK [%d]: %4x %s ec %d%s\n",
+                        session, kref_read(&session->kref), session_id, type,
+                        err, is_closing ? " <closing>" : "");
+       if (ret < 0)
+               return ret;
+
+       /* TCI */
+       if (session->tci.in_use) {
+               ret = wsm_debug_structs(buf, &session->tci, -1);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* WMSs */
+       mutex_lock(&session->wsms_lock);
+       for (i = 0; i < MC_MAP_MAX; i++) {
+               ret = wsm_debug_structs(buf, &session->wsms[i], i);
+               if (ret < 0)
+                       break;
+       }
+       mutex_unlock(&session->wsms_lock);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/session.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/session.h
new file mode 100755 (executable)
index 0000000..b1fff6c
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SESSION_H_
+#define _SESSION_H_
+
+#include <linux/list.h>
+
+#include "mcp.h"
+#include "iwp.h"
+
+struct tee_object;
+struct tee_mmu;
+struct mc_ioctl_buffer;
+
+struct tee_wsm {
+       /* Buffer NWd address (uva or kva, used only for lookup) */
+       uintptr_t               va;
+       /* Buffer length */
+       u32                     len;
+       /* Buffer flags */
+       u32                     flags;
+       /* Buffer SWd address */
+       u32                     sva;
+       union {
+               /* MMU table */
+               struct tee_mmu          *mmu;
+               /* Index of re-used buffer (temporary) */
+               int                     index;
+       };
+       /* Pointer to associated cbuf, if relevant */
+       struct cbuf             *cbuf;
+       /* State of this WSM */
+       bool                    in_use;
+};
+
+struct tee_session {
+       /* Session descriptor */
+       union {
+               struct mcp_session      mcp_session;
+               struct iwp_session      iwp_session;
+       };
+       /* Owner */
+       struct tee_client       *client;
+       /* Number of references kept to this object */
+       struct kref             kref;
+       /* WSM for the TCI */
+       struct tee_wsm          tci;
+       /* The list entry to attach to session list of owner */
+       struct list_head        list;
+       /* Session WSMs lock */
+       struct mutex            wsms_lock;
+       /* WSMs for a session */
+       struct tee_wsm          wsms[MC_MAP_MAX];
+       /* This TA is of Global Platform type */
+       bool                    is_gp;
+};
+
+struct tee_session *session_create(struct tee_client *client,
+                                  const struct mc_identity *identity);
+static inline void session_get(struct tee_session *session)
+{
+       kref_get(&session->kref);
+}
+
+int session_put(struct tee_session *session);
+int session_close(struct tee_session *session);
+
+int session_mc_open_session(struct tee_session *session,
+                           struct mcp_open_info *info);
+int session_mc_cleanup_session(struct tee_session *session);
+int session_mc_notify(struct tee_session *session);
+int session_mc_wait(struct tee_session *session, s32 timeout,
+                   bool silent_expiry);
+int session_mc_map(struct tee_session *session, struct tee_mmu *mmu,
+                  struct mc_ioctl_buffer *bufs);
+int session_mc_unmap(struct tee_session *session,
+                    const struct mc_ioctl_buffer *bufs);
+int session_mc_get_err(struct tee_session *session, s32 *err);
+
+int session_gp_open_session(struct tee_session *session,
+                           const struct mc_uuid_t *uuid,
+                           struct gp_operation *operation,
+                           struct gp_return *gp_ret);
+int session_gp_open_session_domu(struct tee_session *session,
+                                const struct mc_uuid_t *uuid, u64 started,
+                                struct interworld_session *iws,
+                                struct tee_mmu **mmus,
+                                struct gp_return *gp_ret);
+int session_gp_invoke_command(struct tee_session *session, u32 command_id,
+                             struct gp_operation *operation,
+                             struct gp_return *gp_ret);
+int session_gp_invoke_command_domu(struct tee_session *session,
+                                  u64 started, struct interworld_session *iws,
+                                  struct tee_mmu **mmus,
+                                  struct gp_return *gp_ret);
+int session_gp_request_cancellation(u64 slot);
+
+int session_debug_structs(struct kasnprintf_buf *buf,
+                         struct tee_session *session, bool is_closing);
+
+#endif /* _SESSION_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/teeclientapi.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/teeclientapi.c
new file mode 100755 (executable)
index 0000000..071737d
--- /dev/null
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include "public/GP/tee_client_api.h"
+#include "public/mc_user.h"
+
+#include "main.h"
+#include "mci/gptci.h" /* Needs stuff from tee_client_api.h or its includes */
+#include "mci/mcinq.h" /* TA termination codes */
+#include "client.h"
+
+/* Macros */
+#define _TEEC_GET_PARAM_TYPE(t, i) (((t) >> (4 * (i))) & 0xF)
+
+/* Parameter number */
+#define _TEEC_PARAMETER_NUMBER         4
+
+/**teec_shared_memory
+ * These error codes are still to be decided by GP and as we do not wish to
+ * expose any part of the GP TAF as of yet, for now they will have to live here
+ * until we decide what to do about them.
+ */
+#define TEEC_ERROR_TA_LOCKED           0xFFFF0257
+#define TEEC_ERROR_SD_BLOCKED          0xFFFF0258
+#define TEEC_ERROR_TARGET_KILLED       0xFFFF0259
+
+static DECLARE_WAIT_QUEUE_HEAD(operations_wq);
+
+static void _lib_uuid_to_array(const struct teec_uuid *uuid, u8 *uuid_array)
+{
+       u8 *identifier_cursor = (u8 *)uuid;
+       /* offsets and syntax constants. See explanations above */
+#ifdef S_BIG_ENDIAN
+       u32 offsets = 0;
+#else
+       u32 offsets = 0xF1F1DF13;
+#endif
+       u32 i;
+
+       for (i = 0; i < sizeof(struct teec_uuid); i++) {
+               /* Two-digit hex number */
+               s32 offset = ((s32)((offsets & 0xF) << 28)) >> 28;
+               u8 number = identifier_cursor[offset];
+
+               offsets >>= 4;
+               identifier_cursor++;
+
+               uuid_array[i] = number;
+       }
+}
+
+static u32 _teec_to_gp_operation(struct teec_operation *teec_op,
+                                struct gp_operation *gp_op)
+{
+       int i;
+       int ret = 0;
+
+       for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+               switch (_TEEC_GET_PARAM_TYPE(teec_op->param_types, i)) {
+               case TEEC_VALUE_INPUT:
+               case TEEC_VALUE_INOUT:
+                       gp_op->params[i].value.a = teec_op->params[i].value.a;
+                       gp_op->params[i].value.b = teec_op->params[i].value.b;
+                       break;
+               case TEEC_MEMREF_TEMP_INPUT:
+               case TEEC_MEMREF_TEMP_OUTPUT:
+               case TEEC_MEMREF_TEMP_INOUT:
+                       gp_op->params[i].tmpref.buffer =
+                               (uintptr_t)teec_op->params[i].tmpref.buffer;
+                       gp_op->params[i].tmpref.size =
+                               teec_op->params[i].tmpref.size;
+                       break;
+               case TEEC_MEMREF_WHOLE:
+               case TEEC_MEMREF_PARTIAL_INPUT:
+               case TEEC_MEMREF_PARTIAL_OUTPUT:
+               case TEEC_MEMREF_PARTIAL_INOUT:
+                       gp_op->params[i].memref.offset =
+                               teec_op->params[i].memref.offset;
+                       gp_op->params[i].memref.size =
+                               teec_op->params[i].memref.size;
+                       gp_op->params[i].memref.parent.buffer =
+                        (uintptr_t)teec_op->params[i].memref.parent->buffer;
+                       gp_op->params[i].memref.parent.size =
+                               teec_op->params[i].memref.parent->size;
+                       gp_op->params[i].memref.parent.flags =
+                               teec_op->params[i].memref.parent->flags;
+                       break;
+               case TEEC_NONE:
+               case TEEC_VALUE_OUTPUT:
+                       break;
+               default:
+                       ret = -EINVAL;
+               }
+       }
+       gp_op->param_types = teec_op->param_types;
+       return ret;
+}
+
+static void _teec_from_gp_operation(struct gp_operation *gp_op,
+                                   struct teec_operation *teec_op)
+{
+       int i;
+
+       for (i = 0; i < _TEEC_PARAMETER_NUMBER; i++) {
+               switch (_TEEC_GET_PARAM_TYPE(gp_op->param_types, i)) {
+               case TEEC_VALUE_OUTPUT:
+               case TEEC_VALUE_INOUT:
+                       teec_op->params[i].value.a = gp_op->params[i].value.a;
+                       teec_op->params[i].value.b = gp_op->params[i].value.b;
+                       break;
+               case TEEC_MEMREF_TEMP_INPUT:
+               case TEEC_MEMREF_TEMP_OUTPUT:
+               case TEEC_MEMREF_TEMP_INOUT:
+                       teec_op->params[i].tmpref.size =
+                               gp_op->params[i].tmpref.size;
+                       break;
+               case TEEC_MEMREF_WHOLE:
+                       break;
+               case TEEC_MEMREF_PARTIAL_INPUT:
+               case TEEC_MEMREF_PARTIAL_OUTPUT:
+               case TEEC_MEMREF_PARTIAL_INOUT:
+                       teec_op->params[i].memref.size =
+                               gp_op->params[i].memref.size;
+                       break;
+               case TEEC_NONE:
+               case TEEC_VALUE_INPUT:
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static u32 _teec_convert_error(int errno)
+{
+       switch (errno) {
+       case ENOENT:
+               return TEEC_ERROR_ITEM_NOT_FOUND;
+       case EACCES:
+               return TEEC_ERROR_ACCESS_DENIED;
+       case EINVAL:
+               return TEEC_ERROR_BAD_PARAMETERS;
+       case ENOSPC:
+               return TEEC_ERROR_OUT_OF_MEMORY;
+       case ECONNREFUSED:
+               return TEEC_ERROR_SD_BLOCKED;
+       case ECONNABORTED:
+               return TEEC_ERROR_TA_LOCKED;
+       case ECONNRESET:
+               return TEEC_ERROR_TARGET_KILLED;
+       case EBUSY:
+               return TEEC_ERROR_BUSY;
+       case EKEYREJECTED:
+               return TEEC_ERROR_SECURITY;
+       case ETIME:
+               return TEEC_ERROR_TARGET_DEAD;
+       default:
+               return TEEC_ERROR_GENERIC;
+       }
+}
+
+/* teec_initialize_context: TEEC_SUCCESS, Another error code from Table 4-2 */
+u32 teec_initialize_context(const char *name, struct teec_context *context)
+{
+       struct tee_client *client;
+       int ret;
+       (void)name;
+
+       mc_dev_devel("== %s() ==============", __func__);
+
+       if (!context) {
+               mc_dev_devel("context is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       /* Make sure TEE was started */
+       ret = mc_wait_tee_start();
+       if (ret) {
+               mc_dev_err(ret, "TEE failed to start, now or in the past");
+               return TEEC_ERROR_BAD_STATE;
+       }
+
+       /* Create client */
+       client = client_create(true);
+       if (!client)
+               return TEEC_ERROR_OUT_OF_MEMORY;
+
+       /* Store client in context */
+       context->imp.client = client;
+
+       return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(teec_initialize_context);
+
+/*
+ * The implementation of this function MUST NOT be able to fail: after this
+ * function returns the Client Application must be able to consider that the
+ * Context has been closed
+ */
+void teec_finalize_context(struct teec_context *context)
+{
+       mc_dev_devel("== %s() ==============", __func__);
+
+       /* The parameter context MUST point to an initialized TEE Context */
+       if (!context) {
+               mc_dev_devel("context is NULL");
+               return;
+       }
+
+       /* The implementation of this function MUST NOT be able to fail: after
+        * this function returns the Client Application must be able to
+        * consider that the Context has been closed
+        */
+       client_close(context->imp.client);
+       context->imp.client = NULL;
+}
+EXPORT_SYMBOL(teec_finalize_context);
+
+/*
+ * If the return_origin is different from TEEC_ORIGIN_TRUSTED_APP, an error code
+ * from Table 4-2. If the return_origin is equal to TEEC_ORIGIN_TRUSTED_APP, a
+ * return code defined by the protocol between the Client Application and the
+ * Trusted Application
+ */
+u32 teec_open_session(struct teec_context *context,
+                     struct teec_session *session,
+                     const struct teec_uuid *destination,
+                     u32 connection_method,
+                     const void *connection_data,
+                     struct teec_operation *operation,
+                     u32 *return_origin)
+{
+       struct mc_uuid_t uuid;
+       struct mc_identity identity;
+       struct tee_client *client = NULL;
+       struct gp_operation gp_op;
+       struct gp_return gp_ret;
+       int ret = 0, timeout;
+
+       mc_dev_devel("== %s() ==============", __func__);
+       gp_ret.value = TEEC_SUCCESS;
+       if (return_origin)
+               *return_origin = TEEC_ORIGIN_API;
+
+       /* The parameter context MUST point to an initialized TEE Context */
+       if (!context) {
+               mc_dev_devel("context is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       if (!context->imp.client) {
+               mc_dev_devel("context not initialized");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       client = context->imp.client;
+
+       if (!session) {
+               mc_dev_devel("session is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       connection_method = TEEC_TT_LOGIN_KERNEL;
+       session->imp.active = false;
+
+       _lib_uuid_to_array(destination, uuid.value);
+
+       memset(&gp_op, 0, sizeof(gp_op));
+       if (operation) {
+               operation->imp.session = &session->imp;
+               ret = _teec_to_gp_operation(operation, &gp_op);
+               if (ret)
+                       return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       identity.login_type = (enum mc_login_type)connection_method;
+
+       /* Wait for GP loading to be possible, maximum 30s */
+       timeout = 30;
+       do {
+               ret = client_gp_open_session(client, &uuid, &gp_op, &identity,
+                                            &gp_ret, &session->imp.session_id);
+               if (!ret || ret != EAGAIN)
+                       break;
+
+               msleep(1000);
+       } while (--timeout);
+
+       if (ret || gp_ret.value != TEEC_SUCCESS) {
+               mc_dev_devel("client_gp_open_session failed(%08x) %08x", ret,
+                            gp_ret.value);
+               if (ret)
+                       gp_ret.value = _teec_convert_error(-ret);
+               else if (return_origin)
+                       /* Update origin as it's not the API */
+                       *return_origin = gp_ret.origin;
+       } else {
+               mc_dev_devel(" created session ID %x", session->imp.session_id);
+               session->imp.context = context->imp;
+               session->imp.active = true;
+               if (operation)
+                       _teec_from_gp_operation(&gp_op, operation);
+       }
+
+       mc_dev_devel(" %s() = 0x%x", __func__, gp_ret.value);
+       return gp_ret.value;
+}
+EXPORT_SYMBOL(teec_open_session);
+
+u32 teec_invoke_command(struct teec_session *session,
+                       u32 command_id,
+                       struct teec_operation *operation,
+                       u32 *return_origin)
+{
+       struct tee_client *client = NULL;
+       struct gp_operation gp_op = {0};
+       struct gp_return gp_ret = {0};
+       int ret = 0;
+
+       mc_dev_devel("== %s() ==============", __func__);
+
+       gp_ret.value = TEEC_SUCCESS;
+       if (return_origin)
+               *return_origin = TEEC_ORIGIN_API;
+
+       if (!session) {
+               mc_dev_devel("session is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       if (!session->imp.active) {
+               mc_dev_devel("session is inactive");
+               return TEEC_ERROR_BAD_STATE;
+       }
+       client = session->imp.context.client;
+
+       if (operation) {
+               operation->imp.session = &session->imp;
+               if (_teec_to_gp_operation(operation, &gp_op))
+                       return TEEC_ERROR_BAD_PARAMETERS;
+       } else {
+               gp_op.param_types = 0;
+       }
+
+       ret = client_gp_invoke_command(client, session->imp.session_id,
+                                      command_id, &gp_op, &gp_ret);
+
+       if (ret || gp_ret.value != TEEC_SUCCESS) {
+               mc_dev_devel("client_gp_invoke_command failed(%08x) %08x", ret,
+                            gp_ret.value);
+               if (ret)
+                       gp_ret.value = _teec_convert_error(-ret);
+               else if (return_origin)
+                       /* Update origin as it's not the API */
+                       *return_origin = gp_ret.origin;
+       } else if (operation) {
+               _teec_from_gp_operation(&gp_op, operation);
+       }
+
+       mc_dev_devel(" %s() = 0x%x", __func__, gp_ret.value);
+       return gp_ret.value;
+}
+EXPORT_SYMBOL(teec_invoke_command);
+
+void teec_close_session(struct teec_session *session)
+{
+       int ret = 0;
+       struct tee_client *client = NULL;
+
+       mc_dev_devel("== %s() ==============", __func__);
+
+       /* The implementation MUST do nothing if session is NULL */
+       if (!session) {
+               mc_dev_devel("session is NULL");
+               return;
+       }
+       client = session->imp.context.client;
+
+       if (session->imp.active) {
+               ret = client_gp_close_session(client, session->imp.session_id);
+
+               if (ret)
+                       /* continue even in case of error */
+                       mc_dev_devel("client_gp_close failed(%08x)", ret);
+
+               session->imp.active = false;
+       }
+
+       mc_dev_devel(" %s() = 0x%x", __func__, ret);
+}
+EXPORT_SYMBOL(teec_close_session);
+
+/*
+ * Implementation note. We handle internally 2 kind of pointers : kernel memory
+ * (kmalloc, get_pages, ...) and dynamic memory (vmalloc). A global pointer from
+ * a kernel module has the same format as a vmalloc buffer. However, our code
+ * cannot detect that, so it considers it a kmalloc buffer. The TA trying to use
+ * that shared buffer is likely to crash
+ */
+u32 teec_register_shared_memory(struct teec_context *context,
+                               struct teec_shared_memory *shared_mem)
+{
+       struct gp_shared_memory memref;
+       struct gp_return gp_ret;
+       int ret = 0;
+
+       mc_dev_devel("== %s() ==============", __func__);
+
+       /* The parameter context MUST point to an initialized TEE Context */
+       if (!context) {
+               mc_dev_devel("context is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       /*
+        * The parameter shared_mem MUST point to the Shared Memory structure
+        * defining the memory region to register
+        */
+       if (!shared_mem) {
+               mc_dev_devel("shared_mem is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       /*
+        * The buffer field MUST point to the memory region to be shared,
+        * and MUST not be NULL
+        */
+       if (!shared_mem->buffer) {
+               mc_dev_devel("shared_mem->buffer is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       if (shared_mem->flags & ~TEEC_MEM_INOUT) {
+               mc_dev_devel("shared_mem->flags is incorrect");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       if (!shared_mem->flags) {
+               mc_dev_devel("shared_mem->flags is incorrect");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       memref.buffer = (uintptr_t)shared_mem->buffer;
+       memref.flags = shared_mem->flags;
+       memref.size = shared_mem->size;
+       ret = client_gp_register_shared_mem(context->imp.client, NULL, NULL,
+                                           &memref, &gp_ret);
+
+       if (ret)
+               return _teec_convert_error(-ret);
+
+       shared_mem->imp.client = context->imp.client;
+       shared_mem->imp.implementation_allocated = false;
+
+       return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(teec_register_shared_memory);
+
+u32 teec_allocate_shared_memory(struct teec_context *context,
+                               struct teec_shared_memory *shared_mem)
+{
+       struct gp_shared_memory memref;
+       struct gp_return gp_ret;
+       int ret = 0;
+
+       /* No connection to "context"? */
+       mc_dev_devel("== %s() ==============", __func__);
+
+       /* The parameter context MUST point to an initialized TEE Context */
+       if (!context) {
+               mc_dev_devel("context is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       /*
+        * The parameter shared_mem MUST point to the Shared Memory structure
+        * defining the memory region to register
+        */
+       if (!shared_mem) {
+               mc_dev_devel("shared_mem is NULL");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       if (shared_mem->flags & ~TEEC_MEM_INOUT) {
+               mc_dev_devel("shared_mem->flags is incorrect");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+       if (!shared_mem->flags) {
+               mc_dev_devel("shared_mem->flags is incorrect");
+               return TEEC_ERROR_BAD_PARAMETERS;
+       }
+
+       shared_mem->buffer = vmalloc(shared_mem->size);
+       if (!shared_mem->buffer)
+               return TEEC_ERROR_OUT_OF_MEMORY;
+
+       memref.buffer = (uintptr_t)shared_mem->buffer;
+       memref.flags = shared_mem->flags;
+       memref.size = shared_mem->size;
+       ret = client_gp_register_shared_mem(context->imp.client, NULL, NULL,
+                                           &memref, &gp_ret);
+
+       if (ret) {
+               vfree(shared_mem->buffer);
+               shared_mem->buffer = NULL;
+               shared_mem->size = 0;
+               return _teec_convert_error(-ret);
+       }
+
+       shared_mem->imp.client = context->imp.client;
+       shared_mem->imp.implementation_allocated = true;
+
+       return TEEC_SUCCESS;
+}
+EXPORT_SYMBOL(teec_allocate_shared_memory);
+
+void teec_release_shared_memory(struct teec_shared_memory *shared_mem)
+{
+       struct gp_shared_memory memref;
+
+       /* No connection to "context"? */
+       mc_dev_devel("== %s() ==============", __func__);
+
+       /* The implementation MUST do nothing if shared_mem is NULL */
+       if (!shared_mem) {
+               mc_dev_devel("shared_mem is NULL");
+               return;
+       }
+
+       memref.buffer = (uintptr_t)shared_mem->buffer;
+       memref.flags = shared_mem->flags;
+       memref.size = shared_mem->size;
+       (void)client_gp_release_shared_mem(shared_mem->imp.client, &memref);
+
+       /*
+        * For a memory buffer allocated using teec_allocate_shared_memory the
+        * Implementation MUST free the underlying memory
+        */
+       if (shared_mem->imp.implementation_allocated) {
+               if (shared_mem->buffer) {
+                       vfree(shared_mem->buffer);
+                       shared_mem->buffer = NULL;
+                       shared_mem->size = 0;
+               }
+       }
+}
+EXPORT_SYMBOL(teec_release_shared_memory);
+
+void teec_request_cancellation(struct teec_operation *operation)
+{
+       struct teec_session_imp *session;
+       int ret;
+
+       mc_dev_devel("== %s() ==============", __func__);
+
+       ret = wait_event_interruptible(operations_wq, operation->started);
+       if (ret == -ERESTARTSYS) {
+               mc_dev_devel("signal received");
+               return;
+       }
+
+       mc_dev_devel("operation->started changed from 0 to %d",
+                    operation->started);
+
+       if (operation->started > 1) {
+               mc_dev_devel("the operation has finished");
+               return;
+       }
+
+       session = operation->imp.session;
+       operation->started = 2;
+       wake_up_interruptible(&operations_wq);
+
+       if (!session->active) {
+               mc_dev_devel("Corresponding session is not active");
+               return;
+       }
+
+       /* TODO: handle cancellation */
+
+       /* Signal the Trustlet */
+       ret = client_notify_session(session->context.client,
+                                   session->session_id);
+       if (ret)
+               mc_dev_devel("Notify failed: %d", ret);
+}
+EXPORT_SYMBOL(teec_request_cancellation);
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/user.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/user.c
new file mode 100755 (executable)
index 0000000..e2b63f3
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm_types.h>    /* struct vm_area_struct */
+#include <linux/uaccess.h>
+
+#include "public/mc_user.h"
+
+#include "main.h"
+#include "user.h"
+#include "client.h"
+#include "mcp.h"       /* mcp_get_version */
+
+/*
+ * Get client object from file pointer
+ */
+static inline struct tee_client *get_client(struct file *file)
+{
+       return (struct tee_client *)file->private_data;
+}
+
+/*
+ * Callback for system open()
+ * A set of internal client data are created and initialized.
+ *
+ * @inode
+ * @file
+ * Returns 0 if OK or -ENOMEM if no allocation was possible.
+ */
+static int user_open(struct inode *inode, struct file *file)
+{
+       struct tee_client *client;
+
+       /* Create client */
+       mc_dev_devel("from %s (%d)", current->comm, current->pid);
+       client = client_create(false);
+       if (!client)
+               return -ENOMEM;
+
+       /* Store client in user file */
+       file->private_data = client;
+       return 0;
+}
+
+/*
+ * Callback for system close()
+ * The client object is freed.
+ * @inode
+ * @file
+ * Returns 0
+ */
+static int user_release(struct inode *inode, struct file *file)
+{
+       struct tee_client *client = get_client(file);
+
+       /* Close client */
+       mc_dev_devel("from %s (%d)", current->comm, current->pid);
+       if (!client)
+               return -EPROTO;
+
+       /* Detach client from user file */
+       file->private_data = NULL;
+
+       /* Destroy client, including remaining sessions */
+       client_close(client);
+       return 0;
+}
+
+/*
+ * Check r/w access to referenced memory
+ */
+static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg)
+{
+       int err = 0;
+
+       if (_IOC_DIR(cmd) & _IOC_READ)
+               err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+       else if (_IOC_DIR(cmd) & _IOC_WRITE)
+               err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+
+       if (err)
+               return -EFAULT;
+
+       return 0;
+}
+
+/*
+ * Callback for system ioctl()
+ * Implement most of ClientLib API functions
+ * @file       pointer to file
+ * @cmd                command
+ * @arg                arguments
+ *
+ * Returns 0 for OK and an errno in case of error
+ */
+static long user_ioctl(struct file *file, unsigned int id, unsigned long arg)
+{
+       struct tee_client *client = get_client(file);
+       int __user *uarg = (int __user *)arg;
+       int ret = -EINVAL;
+
+       mc_dev_devel("%u from %s", _IOC_NR(id), current->comm);
+
+       if (!client)
+               return -EPROTO;
+
+       if (ioctl_check_pointer(id, uarg))
+               return -EFAULT;
+
+       switch (id) {
+       case MC_IO_HAS_SESSIONS:
+               /* Freeze the client */
+               if (client_has_sessions(client))
+                       ret = -ENOTEMPTY;
+               else
+                       ret = 0;
+               break;
+
+       case MC_IO_OPEN_SESSION: {
+               struct mc_ioctl_open_session session;
+
+               if (copy_from_user(&session, uarg, sizeof(session))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_mc_open_session(client, &session.uuid,
+                                            session.tci, session.tcilen,
+                                            &session.sid);
+               if (ret)
+                       break;
+
+               if (copy_to_user(uarg, &session, sizeof(session))) {
+                       ret = -EFAULT;
+                       client_remove_session(client, session.sid);
+                       break;
+               }
+               break;
+       }
+       case MC_IO_OPEN_TRUSTLET: {
+               struct mc_ioctl_open_trustlet trustlet;
+
+               if (copy_from_user(&trustlet, uarg, sizeof(trustlet))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_mc_open_trustlet(client, trustlet.spid,
+                                             trustlet.buffer, trustlet.tlen,
+                                             trustlet.tci, trustlet.tcilen,
+                                             &trustlet.sid);
+               if (ret)
+                       break;
+
+               if (copy_to_user(uarg, &trustlet, sizeof(trustlet))) {
+                       ret = -EFAULT;
+                       client_remove_session(client, trustlet.sid);
+                       break;
+               }
+               break;
+       }
+       case MC_IO_CLOSE_SESSION: {
+               u32 sid = (u32)arg;
+
+               ret = client_remove_session(client, sid);
+               break;
+       }
+       case MC_IO_NOTIFY: {
+               u32 sid = (u32)arg;
+
+               ret = client_notify_session(client, sid);
+               break;
+       }
+       case MC_IO_WAIT: {
+               struct mc_ioctl_wait wait;
+
+               if (copy_from_user(&wait, uarg, sizeof(wait))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               ret = client_waitnotif_session(client, wait.sid, wait.timeout,
+                                              wait.partial);
+               break;
+       }
+       case MC_IO_MAP: {
+               struct mc_ioctl_map map;
+
+               if (copy_from_user(&map, uarg, sizeof(map))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_mc_map(client, map.sid, NULL, &map.buf);
+               if (ret)
+                       break;
+
+               /* Fill in return struct */
+               if (copy_to_user(uarg, &map, sizeof(map))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               break;
+       }
+       case MC_IO_UNMAP: {
+               struct mc_ioctl_map map;
+
+               if (copy_from_user(&map, uarg, sizeof(map))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_mc_unmap(client, map.sid, &map.buf);
+               break;
+       }
+       case MC_IO_ERR: {
+               struct mc_ioctl_geterr __user *uerr =
+                       (struct mc_ioctl_geterr __user *)uarg;
+               u32 sid;
+               s32 exit_code;
+
+               if (get_user(sid, &uerr->sid)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_get_session_exitcode(client, sid, &exit_code);
+               if (ret)
+                       break;
+
+               /* Fill in return struct */
+               if (put_user(exit_code, &uerr->value)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               break;
+       }
+       case MC_IO_VERSION: {
+               struct mc_version_info version_info;
+
+               ret = mcp_get_version(&version_info);
+               if (ret)
+                       break;
+
+               if (copy_to_user(uarg, &version_info, sizeof(version_info)))
+                       ret = -EFAULT;
+
+               break;
+       }
+       case MC_IO_GP_INITIALIZE_CONTEXT: {
+               struct mc_ioctl_gp_initialize_context context;
+
+               if (copy_from_user(&context, uarg, sizeof(context))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_gp_initialize_context(client, &context.ret);
+
+               if (copy_to_user(uarg, &context, sizeof(context))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               break;
+       }
+       case MC_IO_GP_REGISTER_SHARED_MEM: {
+               struct mc_ioctl_gp_register_shared_mem shared_mem;
+
+               if (copy_from_user(&shared_mem, uarg, sizeof(shared_mem))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_gp_register_shared_mem(client, NULL, NULL,
+                                                   &shared_mem.memref,
+                                                   &shared_mem.ret);
+
+               if (copy_to_user(uarg, &shared_mem, sizeof(shared_mem))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               break;
+       }
+       case MC_IO_GP_RELEASE_SHARED_MEM: {
+               struct mc_ioctl_gp_release_shared_mem shared_mem;
+
+               if (copy_from_user(&shared_mem, uarg, sizeof(shared_mem))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_gp_release_shared_mem(client, &shared_mem.memref);
+               break;
+       }
+       case MC_IO_GP_OPEN_SESSION: {
+               struct mc_ioctl_gp_open_session session;
+
+               if (copy_from_user(&session, uarg, sizeof(session))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_gp_open_session(client, &session.uuid,
+                                            &session.operation,
+                                            &session.identity,
+                                            &session.ret, &session.session_id);
+
+               if (copy_to_user(uarg, &session, sizeof(session))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               break;
+       }
+       case MC_IO_GP_CLOSE_SESSION: {
+               struct mc_ioctl_gp_close_session session;
+
+               if (copy_from_user(&session, uarg, sizeof(session))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_gp_close_session(client, session.session_id);
+               break;
+       }
+       case MC_IO_GP_INVOKE_COMMAND: {
+               struct mc_ioctl_gp_invoke_command command;
+
+               if (copy_from_user(&command, uarg, sizeof(command))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = client_gp_invoke_command(client, command.session_id,
+                                              command.command_id,
+                                              &command.operation,
+                                              &command.ret);
+
+               if (copy_to_user(uarg, &command, sizeof(command))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               break;
+       }
+       case MC_IO_GP_REQUEST_CANCELLATION: {
+               struct mc_ioctl_gp_request_cancellation cancel;
+
+               if (copy_from_user(&cancel, uarg, sizeof(cancel))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               client_gp_request_cancellation(client,
+                                              cancel.operation.started);
+               ret = 0;
+               break;
+       }
+       default:
+               ret = -ENOIOCTLCMD;
+               mc_dev_err(ret, "unsupported command no %d", id);
+       }
+
+       return ret;
+}
+
+/*
+ * Callback for system mmap()
+ */
+static int user_mmap(struct file *file, struct vm_area_struct *vmarea)
+{
+       struct tee_client *client = get_client(file);
+
+       if ((vmarea->vm_end - vmarea->vm_start) > BUFFER_LENGTH_MAX)
+               return -EINVAL;
+
+       /* Alloc contiguous buffer for this client */
+       return client_cbuf_create(client,
+                                 (u32)(vmarea->vm_end - vmarea->vm_start),
+                                 NULL, vmarea);
+}
+
+static const struct file_operations mc_user_fops = {
+       .owner = THIS_MODULE,
+       .open = user_open,
+       .release = user_release,
+       .unlocked_ioctl = user_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = user_ioctl,
+#endif
+       .mmap = user_mmap,
+};
+
+int mc_user_init(struct cdev *cdev)
+{
+       cdev_init(cdev, &mc_user_fops);
+       return 0;
+}
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/user.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/user.h
new file mode 100755 (executable)
index 0000000..015b0b1
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _USER_H_
+#define _USER_H_
+
+struct cdev;
+
+int mc_user_init(struct cdev *cdev);
+static inline void mc_user_exit(void)
+{
+}
+
+#endif /* _USER_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_be.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_be.c
new file mode 100644 (file)
index 0000000..fe299c6
--- /dev/null
@@ -0,0 +1,1151 @@
+/*
+ * Copyright (c) 2017-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_XEN
+
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+
+#if KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE
+#include <xen/balloon.h>
+#endif
+
+#include "platform.h"          /* MC_XENBUS_MAP_RING_VALLOC_4_1 */
+#include "main.h"
+#include "admin.h"             /* tee_object* */
+#include "client.h"            /* Consider other VMs as clients */
+#include "mmu.h"
+#include "mcp.h"               /* mcp_get_version */
+#include "nq.h"
+#include "xen_common.h"
+#include "xen_be.h"
+
+#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
+
+static struct {
+       struct list_head        xfes;
+       struct mutex            xfes_mutex;     /* Protect the above */
+} l_ctx;
+
+/* Maps */
+
+struct xen_be_map {
+       struct page             **pages;
+       grant_handle_t          *handles;
+       unsigned long           nr_pages;
+       u32                     flags;
+       bool                    pages_allocd;
+       bool                    refs_mapped;
+       /* To auto-delete */
+       struct tee_deleter      deleter;
+};
+
+static void xen_be_map_delete(struct xen_be_map *map)
+{
+       int i;
+
+       if (map->refs_mapped) {
+               struct gnttab_unmap_grant_ref *unmaps;
+
+               unmaps = kcalloc(map->nr_pages, sizeof(*unmaps), GFP_KERNEL);
+               if (!unmaps)
+                       /* Cannot go on */
+                       return;
+
+               for (i = 0; i < map->nr_pages; i++)
+                       gnttab_set_unmap_op(&unmaps[i], vaddr(map->pages[i]),
+                                           map->flags, map->handles[i]);
+
+               if (gnttab_unmap_refs(unmaps, NULL, map->pages, map->nr_pages))
+                       /* Cannot go on */
+                       return;
+
+               for (i = 0; i < map->nr_pages; i++)
+                       put_page(map->pages[i]);
+
+               kfree(unmaps);
+       }
+
+       if (map->pages_allocd)
+#if KERNEL_VERSION(4, 0, 0) <= LINUX_VERSION_CODE
+               gnttab_free_pages(map->nr_pages, map->pages);
+#else
+               free_xenballooned_pages(map->nr_pages, map->pages);
+#endif
+
+       kfree(map->handles);
+       kfree(map->pages);
+       kfree(map);
+       mc_dev_devel("freed xen map %p", map);
+       atomic_dec(&g_ctx.c_xen_maps);
+}
+
+static struct xen_be_map *be_map_create(const struct xen_be_map *pte_map,
+                                       grant_ref_t *refs, int nr_refs,
+                                       int pte_entries_max, int dom_id,
+                                       bool readonly)
+{
+       struct xen_be_map *map;
+       struct gnttab_map_grant_ref *maps = NULL;
+       int i, ret = -ENOMEM;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return ERR_PTR(-ENOMEM);
+
+       atomic_inc(&g_ctx.c_xen_maps);
+       map->flags = GNTMAP_host_map;
+       if (readonly)
+               map->flags |= GNTMAP_readonly;
+
+       map->nr_pages = nr_refs;
+       map->pages = kcalloc(map->nr_pages, sizeof(*map->pages), GFP_KERNEL);
+       if (!map->pages)
+               goto out;
+
+       map->handles = kcalloc(map->nr_pages, sizeof(*map->handles),
+                              GFP_KERNEL);
+       if (!map->handles)
+               goto out;
+
+#if KERNEL_VERSION(4, 0, 0) <= LINUX_VERSION_CODE
+       if (gnttab_alloc_pages(map->nr_pages, map->pages))
+               goto out;
+#else
+       if (alloc_xenballooned_pages(map->nr_pages, map->pages, false))
+               goto out;
+#endif
+
+       map->pages_allocd = true;
+       maps = kcalloc(map->nr_pages, sizeof(*maps), GFP_KERNEL);
+       if (!maps)
+               goto out;
+
+       if (pte_map) {
+               int k = 0, nr_refs_left = nr_refs;
+
+               for (i = 0; i < pte_map->nr_pages; i++) {
+                       int j, nr_refs = nr_refs_left;
+                       grant_ref_t *refs = (void *)vaddr(pte_map->pages[i]);
+
+                       if (nr_refs > pte_entries_max)
+                               nr_refs = pte_entries_max;
+
+                       for (j = 0;  j < nr_refs; j++) {
+                               mc_dev_devel("map [%d, %d] -> %d ref %u",
+                                            i, j, k, refs[j]);
+#ifdef DEBUG
+                               /* Relax serial interface to not kill the USB */
+                               usleep_range(100, 200);
+#endif
+                               gnttab_set_map_op(
+                                       &maps[k], vaddr(map->pages[k]),
+                                       map->flags, refs[j], dom_id);
+                               nr_refs_left--;
+                               k++;
+                       }
+               }
+       } else {
+               for (i = 0;  i < map->nr_pages; i++) {
+                       mc_dev_devel("map table %d ref %u", i, refs[i]);
+                       gnttab_set_map_op(&maps[i], vaddr(map->pages[i]),
+                                         map->flags, refs[i], dom_id);
+               }
+       }
+
+       ret = gnttab_map_refs(maps, NULL, map->pages, map->nr_pages);
+       if (ret)
+               goto out;
+
+       map->refs_mapped = true;
+       /* Pin pages */
+       for (i = 0;  i < map->nr_pages; i++) {
+               get_page(map->pages[i]);
+               map->handles[i] = maps[i].handle;
+       }
+
+out:
+       kfree(maps);
+
+       if (ret) {
+               xen_be_map_delete(map);
+               return ERR_PTR(-ret);
+       }
+
+       mc_dev_devel("created %s xen map %p", pte_map ? "buffer" : "ptes", map);
+       return map;
+}
+
+static struct xen_be_map *xen_be_map_create(struct tee_xen_buffer *buffer,
+                                           int pte_entries_max, int dom_id)
+{
+       struct xen_be_map *map;
+       struct xen_be_map *pte_map;
+       int nr_pte_refs =
+               (buffer->info->nr_refs + pte_entries_max - 1) / pte_entries_max;
+
+       /* First map the PTE pages */
+       pte_map = be_map_create(NULL, buffer->data.refs, nr_pte_refs,
+                               pte_entries_max, dom_id, true);
+       if (IS_ERR(pte_map))
+               return pte_map;
+
+       /* Now map the pages */
+       map = be_map_create(pte_map, NULL, buffer->info->nr_refs,
+                           pte_entries_max, dom_id,
+                           buffer->info->flags == MC_IO_MAP_INPUT);
+       /* PTE pages mapping not needed any more */
+       xen_be_map_delete(pte_map);
+       if (!IS_ERR(map)) {
+               /* Auto-delete */
+               map->deleter.object = map;
+               map->deleter.delete = (void(*)(void *))xen_be_map_delete;
+       }
+
+       return map;
+}
+
+/* Dom0 call to DomU */
+
+/* Must be called under xfe->ring_mutex */
+static inline void call_domu(struct tee_xfe *xfe, enum tee_xen_dom0_cmd cmd,
+                            u32 id, int ret)
+{
+       WARN_ON(!xfe->ring_busy);
+
+       /* Set command and ID */
+       xfe->ring->dom0.cmd = cmd;
+       xfe->ring->dom0.id = id;
+       xfe->ring->dom0.cmd_ret = ret;
+       mc_dev_devel("Dom0 -> DomU request %u id %u ret %d",
+                    xfe->ring->dom0.cmd, xfe->ring->dom0.id,
+                    xfe->ring->dom0.cmd_ret);
+       /* Call */
+       notify_remote_via_irq(xfe->irq_dom0);
+       wait_for_completion(&xfe->ring_completion);
+}
+
+/* Will be called back under xfe->ring_mutex */
+static irqreturn_t xen_be_irq_handler_dom0_th(int intr, void *arg)
+{
+       struct tee_xfe *xfe = arg;
+
+       if (!xfe->ring->dom0.cmd) {
+               mc_dev_devel("Ignore IRQ with no command (on DomU connect)");
+               return IRQ_HANDLED;
+       }
+
+       WARN_ON(!xfe->ring_busy);
+
+       /* Response to a dom0 command, our side of ring locked by us */
+       mc_dev_devel("Dom0 -> DomU response %u id %u ret %d",
+                    xfe->ring->dom0.cmd, xfe->ring->dom0.id,
+                    xfe->ring->dom0.cmd_ret);
+       xfe->ring->dom0.cmd = TEE_XEN_DOM0_NONE;
+       xfe->ring->dom0.id = 0;
+       xfe->ring->dom0.cmd_ret = -EPERM;
+       complete(&xfe->ring_completion);
+
+       return IRQ_HANDLED;
+}
+
+/* MC protocol interface */
+
+static inline int xen_be_get_version(struct tee_xfe *xfe)
+{
+       struct mc_version_info version_info;
+       int ret;
+
+       ret = mcp_get_version(&version_info);
+       if (ret)
+               return ret;
+
+       xfe->ring->domu.version_info = version_info;
+       return 0;
+}
+
+static inline int xen_be_mc_has_sessions(struct tee_xfe *xfe)
+{
+       return client_has_sessions(xfe->client) ? -EBUSY : 0;
+}
+
+static inline int xen_be_mc_open_session(struct tee_xfe *xfe)
+{
+       struct tee_xen_buffer *tci_buffer = &xfe->buffers[0];
+       struct mcp_open_info info = {
+               .type = TEE_MC_UUID,
+               .uuid = &xfe->ring->domu.uuid,
+       };
+       int ret;
+
+       if (tci_buffer->info->flags) {
+               struct xen_be_map *map;
+               struct mcp_buffer_map b_map = {
+                       .offset = tci_buffer->info->offset,
+                       .length = tci_buffer->info->length,
+                       .flags = tci_buffer->info->flags,
+               };
+
+               map = xen_be_map_create(tci_buffer, xfe->pte_entries_max,
+                                       xfe->xdev->otherend_id);
+               if (IS_ERR(map)) {
+                       ret = PTR_ERR(map);
+                       goto out;
+               }
+
+               /* Shall be freed by session */
+               b_map.nr_pages = map->nr_pages;
+               info.tci_mmu = tee_mmu_wrap(&map->deleter, map->pages,
+                                           &b_map);
+               if (IS_ERR(info.tci_mmu)) {
+                       ret = PTR_ERR(info.tci_mmu);
+                       info.tci_mmu = NULL;
+                       goto out;
+               }
+       }
+
+       /* Open session */
+       ret = client_mc_open_common(xfe->client, &info,
+                                   &xfe->ring->domu.session_id);
+
+out:
+       if (info.tci_mmu)
+               tee_mmu_put(info.tci_mmu);
+
+       mc_dev_devel("session %x, exit with %d",
+                    xfe->ring->domu.session_id, ret);
+       return ret;
+}
+
+static inline int xen_be_mc_open_trustlet(struct tee_xfe *xfe)
+{
+       struct tee_xen_buffer *ta_buffer = &xfe->buffers[1];
+       struct tee_xen_buffer *tci_buffer = &xfe->buffers[0];
+       struct mcp_open_info info = {
+               .type = TEE_MC_TA,
+       };
+       struct xen_be_map *ta_map;
+       void *addr = NULL;
+       int ret = -ENOMEM;
+
+       ta_map = xen_be_map_create(ta_buffer, xfe->pte_entries_max,
+                                  xfe->xdev->otherend_id);
+       if (IS_ERR(ta_map))
+               return PTR_ERR(ta_map);
+
+       info.spid = xfe->ring->domu.spid;
+       addr = vmap(ta_map->pages, ta_map->nr_pages,
+                   VM_MAP | VM_IOREMAP | VM_USERMAP, PAGE_KERNEL);
+       if (!addr)
+               goto out;
+
+       info.va = (uintptr_t)addr + ta_buffer->info->offset;
+       info.len = ta_buffer->info->length;
+
+       if (tci_buffer->info->flags) {
+               struct xen_be_map *map;
+               struct mcp_buffer_map b_map = {
+                       .offset = tci_buffer->info->offset,
+                       .length = tci_buffer->info->length,
+                       .flags = tci_buffer->info->flags,
+               };
+
+               map = xen_be_map_create(tci_buffer, xfe->pte_entries_max,
+                                       xfe->xdev->otherend_id);
+               if (IS_ERR(map)) {
+                       ret = PTR_ERR(map);
+                       goto out;
+               }
+
+               /* Shall be freed by session */
+               b_map.nr_pages = map->nr_pages;
+               info.tci_mmu = tee_mmu_wrap(&map->deleter, map->pages, &b_map);
+               if (IS_ERR(info.tci_mmu)) {
+                       ret = PTR_ERR(info.tci_mmu);
+                       info.tci_mmu = NULL;
+                       goto out;
+               }
+       }
+
+       /* Open session */
+       ret = client_mc_open_common(xfe->client, &info,
+                                   &xfe->ring->domu.session_id);
+
+out:
+       if (info.tci_mmu)
+               tee_mmu_put(info.tci_mmu);
+
+       if (addr)
+               vunmap(addr);
+
+       xen_be_map_delete(ta_map);
+
+       mc_dev_devel("session %x, exit with %d",
+                    xfe->ring->domu.session_id, ret);
+       return ret;
+}
+
+static inline int xen_be_mc_close_session(struct tee_xfe *xfe)
+{
+       return client_remove_session(xfe->client, xfe->ring->domu.session_id);
+}
+
+static inline int xen_be_mc_notify(struct tee_xfe *xfe)
+{
+       return client_notify_session(xfe->client, xfe->ring->domu.session_id);
+}
+
+/* mc_wait cannot keep the ring busy while waiting, so we use a worker */
+struct mc_wait_work {
+       struct work_struct      work;
+       struct tee_xfe          *xfe;
+       u32                     session_id;
+       s32                     timeout;
+       u32                     id;
+};
+
+static void xen_be_mc_wait_worker(struct work_struct *work)
+{
+       struct mc_wait_work *wait_work =
+               container_of(work, struct mc_wait_work, work);
+       struct tee_xfe *xfe = wait_work->xfe;
+       int ret;
+
+       ret = client_waitnotif_session(wait_work->xfe->client,
+                                      wait_work->session_id,
+                                      wait_work->timeout, false);
+
+       /* Send return code */
+       mc_dev_devel("MC wait session done %x, ret %d",
+                    wait_work->session_id, ret);
+       ring_get(xfe);
+       /* In */
+       xfe->ring->dom0.session_id = wait_work->session_id;
+       /* Call */
+       call_domu(xfe, TEE_XEN_MC_WAIT_DONE, wait_work->id, ret);
+       /* Out */
+       ring_put(xfe);
+       kfree(wait_work);
+       tee_xfe_put(xfe);
+}
+
+static inline int xen_be_mc_wait(struct tee_xfe *xfe)
+{
+       struct mc_wait_work *wait_work;
+
+       /* Wait in a separate thread to release the communication ring */
+       wait_work = kzalloc(sizeof(*wait_work), GFP_KERNEL);
+       if (!wait_work)
+               return -ENOMEM;
+
+       tee_xfe_get(xfe);
+       wait_work->xfe = xfe;
+       wait_work->session_id = xfe->ring->domu.session_id;
+       wait_work->timeout = xfe->ring->domu.timeout;
+       wait_work->id = xfe->ring->domu.id;
+       INIT_WORK(&wait_work->work, xen_be_mc_wait_worker);
+       schedule_work(&wait_work->work);
+       return 0;
+}
+
+static inline int xen_be_mc_map(struct tee_xfe *xfe)
+{
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       struct xen_be_map *map;
+       struct tee_mmu *mmu = NULL;
+       struct mc_ioctl_buffer buf;
+       struct mcp_buffer_map b_map = {
+               .offset = buffer->info->offset,
+               .length = buffer->info->length,
+               .flags = buffer->info->flags,
+       };
+       int ret;
+
+       map = xen_be_map_create(buffer, xfe->pte_entries_max,
+                               xfe->xdev->otherend_id);
+       if (IS_ERR(map)) {
+               ret = PTR_ERR(map);
+               return ret;
+       }
+
+       /* Shall be freed by session */
+       b_map.nr_pages = map->nr_pages;
+       mmu = tee_mmu_wrap(&map->deleter, map->pages, &b_map);
+       if (IS_ERR(mmu)) {
+               xen_be_map_delete(map);
+               return PTR_ERR(mmu);
+       }
+
+       ret = client_mc_map(xfe->client, xfe->ring->domu.session_id, mmu, &buf);
+       /* Releasing the MMU shall also clear the map */
+       tee_mmu_put(mmu);
+       if (!ret)
+               buffer->info->sva = buf.sva;
+
+       mc_dev_devel("session %x, exit with %d",
+                    xfe->ring->domu.session_id, ret);
+       return ret;
+}
+
+static inline int xen_be_mc_unmap(struct tee_xfe *xfe)
+{
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       struct mc_ioctl_buffer buf = {
+               .len = buffer->info->length,
+               .sva = buffer->info->sva,
+       };
+       int ret;
+
+       ret = client_mc_unmap(xfe->client, xfe->ring->domu.session_id, &buf);
+
+       mc_dev_devel("session %x, exit with %d",
+                    xfe->ring->domu.session_id, ret);
+       return ret;
+}
+
+static inline int xen_be_mc_get_err(struct tee_xfe *xfe)
+{
+       int ret;
+
+       ret = client_get_session_exitcode(xfe->client,
+                                         xfe->ring->domu.session_id,
+                                         &xfe->ring->domu.err);
+       mc_dev_devel("session %x err %d, exit with %d",
+                    xfe->ring->domu.session_id, xfe->ring->domu.err, ret);
+       return ret;
+}
+
+/* GP protocol interface */
+
+static inline int xen_be_gp_register_shared_mem(struct tee_xfe *xfe)
+{
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       struct xen_be_map *map;
+       struct tee_mmu *mmu = NULL;
+       struct gp_shared_memory memref = {
+               .buffer = buffer->info->addr,
+               .size = buffer->info->length,
+               .flags = buffer->info->flags,
+       };
+       struct mcp_buffer_map b_map = {
+               .offset = buffer->info->offset,
+               .length = buffer->info->length,
+               .flags = buffer->info->flags,
+       };
+       int ret;
+
+       map = xen_be_map_create(buffer, xfe->pte_entries_max,
+                               xfe->xdev->otherend_id);
+       if (IS_ERR(map)) {
+               ret = PTR_ERR(map);
+               return ret;
+       }
+
+       /* Shall be freed by session */
+       b_map.nr_pages = map->nr_pages;
+       mmu = tee_mmu_wrap(&map->deleter, map->pages, &b_map);
+       if (IS_ERR(mmu)) {
+               xen_be_map_delete(map);
+               return PTR_ERR(mmu);
+       }
+
+       ret = client_gp_register_shared_mem(xfe->client, mmu,
+                                           &buffer->info->sva, &memref,
+                                           &xfe->ring->domu.gp_ret);
+       /* Releasing the MMU shall also clear the map */
+       tee_mmu_put(mmu);
+       mc_dev_devel("session %x, exit with %d",
+                    xfe->ring->domu.session_id, ret);
+       return ret;
+}
+
+static inline int xen_be_gp_release_shared_mem(struct tee_xfe *xfe)
+{
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       struct gp_shared_memory memref = {
+               .buffer = buffer->info->addr,
+               .size = buffer->info->length,
+               .flags = buffer->info->flags,
+       };
+       int ret;
+
+       ret = client_gp_release_shared_mem(xfe->client, &memref);
+
+       mc_dev_devel("exit with %d", ret);
+       return ret;
+}
+
+/* GP functions cannot keep the ring busy while waiting, so we use a worker */
+struct gp_work {
+       struct work_struct              work;
+       struct tee_xfe                  *xfe;
+       u64                             operation_id;
+       struct interworld_session       iws;
+       struct tee_mmu                  *mmus[4];
+       struct mc_uuid_t                uuid;
+       u32                             session_id;
+       u32                             id;
+};
+
+static void xen_be_gp_open_session_worker(struct work_struct *work)
+{
+       struct gp_work *gp_work = container_of(work, struct gp_work, work);
+       struct tee_xfe *xfe = gp_work->xfe;
+       struct gp_return gp_ret;
+       int i, ret;
+
+       ret = client_gp_open_session_domu(xfe->client, &gp_work->uuid,
+                                         gp_work->operation_id, &gp_work->iws,
+                                         gp_work->mmus, &gp_ret);
+       mc_dev_devel("GP open session done, ret %d", ret);
+       for (i = 0; i < TEE_BUFFERS; i++)
+               if (gp_work->mmus[i])
+                       tee_mmu_put(gp_work->mmus[i]);
+
+       /* Send return code */
+       ring_get(xfe);
+       /* In */
+       xfe->ring->dom0.operation_id = gp_work->operation_id;
+       xfe->ring->dom0.iws = gp_work->iws;
+       xfe->ring->dom0.gp_ret = gp_ret;
+       /* Call */
+       call_domu(xfe, TEE_XEN_GP_OPEN_SESSION_DONE, gp_work->id, ret);
+       /* Out */
+       ring_put(xfe);
+       kfree(gp_work);
+       tee_xfe_put(xfe);
+}
+
+static inline int xen_be_gp_open_session(struct tee_xfe *xfe)
+{
+       struct gp_work *gp_work;
+       int i, ret = 0;
+
+       gp_work = kzalloc(sizeof(*gp_work), GFP_KERNEL);
+       if (!gp_work)
+               return -ENOMEM;
+
+       /* Map tmpref buffers */
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               struct tee_xen_buffer *buffer = &xfe->buffers[i];
+               struct xen_be_map *map;
+               struct mcp_buffer_map b_map = {
+                       .offset = buffer->info->offset,
+                       .length = buffer->info->length,
+                       .flags = buffer->info->flags,
+               };
+
+               if (!buffer->info->flags)
+                       continue;
+
+               map = xen_be_map_create(buffer, xfe->pte_entries_max,
+                                       xfe->xdev->otherend_id);
+               if (IS_ERR(map)) {
+                       ret = PTR_ERR(map);
+                       goto err_map;
+               }
+
+               /* Shall be freed by session */
+               b_map.nr_pages = map->nr_pages;
+               gp_work->mmus[i] = tee_mmu_wrap(&map->deleter, map->pages,
+                                               &b_map);
+               if (IS_ERR(gp_work->mmus[i])) {
+                       xen_be_map_delete(map);
+                       ret = PTR_ERR(gp_work->mmus[i]);
+                       goto err_mmus;
+               }
+       }
+
+       tee_xfe_get(xfe);
+       gp_work->xfe = xfe;
+       gp_work->operation_id = xfe->ring->domu.operation_id;
+       gp_work->iws = xfe->ring->domu.iws;
+       gp_work->uuid = xfe->ring->domu.uuid;
+       gp_work->id = xfe->ring->domu.id;
+       INIT_WORK(&gp_work->work, xen_be_gp_open_session_worker);
+       schedule_work(&gp_work->work);
+       return 0;
+
+err_mmus:
+       for (i = 0; i < TEE_BUFFERS; i++)
+               if (!IS_ERR_OR_NULL(gp_work->mmus[i]))
+                       tee_mmu_put(gp_work->mmus[i]);
+err_map:
+       kfree(gp_work);
+       return ret;
+}
+
+static void xen_be_gp_close_session_worker(struct work_struct *work)
+{
+       struct gp_work *gp_work = container_of(work, struct gp_work, work);
+       struct tee_xfe *xfe = gp_work->xfe;
+       int ret;
+
+       ret = client_gp_close_session(xfe->client, gp_work->session_id);
+       mc_dev_devel("GP close session done, ret %d", ret);
+
+       /* Send return code */
+       ring_get(xfe);
+       /* In */
+       xfe->ring->dom0.operation_id = gp_work->operation_id;
+       /* Call */
+       call_domu(xfe, TEE_XEN_GP_CLOSE_SESSION_DONE, gp_work->id, ret);
+       /* Out */
+       ring_put(xfe);
+       kfree(gp_work);
+       tee_xfe_put(xfe);
+}
+
+static inline int xen_be_gp_close_session(struct tee_xfe *xfe)
+{
+       struct gp_work *gp_work;
+
+       gp_work = kzalloc(sizeof(*gp_work), GFP_KERNEL);
+       if (!gp_work)
+               return -ENOMEM;
+
+       tee_xfe_get(xfe);
+       gp_work->xfe = xfe;
+       gp_work->operation_id = xfe->ring->domu.operation_id;
+       gp_work->session_id = xfe->ring->domu.session_id;
+       gp_work->id = xfe->ring->domu.id;
+       INIT_WORK(&gp_work->work, xen_be_gp_close_session_worker);
+       schedule_work(&gp_work->work);
+       return 0;
+}
+
+static void xen_be_gp_invoke_command_worker(struct work_struct *work)
+{
+       struct gp_work *gp_work = container_of(work, struct gp_work, work);
+       struct tee_xfe *xfe = gp_work->xfe;
+       struct gp_return gp_ret;
+       int i, ret;
+
+       ret = client_gp_invoke_command_domu(xfe->client, gp_work->session_id,
+                                           gp_work->operation_id,
+                                           &gp_work->iws, gp_work->mmus,
+                                           &gp_ret);
+       mc_dev_devel("GP invoke command done, ret %d", ret);
+       for (i = 0; i < TEE_BUFFERS; i++)
+               if (gp_work->mmus[i])
+                       tee_mmu_put(gp_work->mmus[i]);
+
+       /* Send return code */
+       ring_get(xfe);
+       /* In */
+       xfe->ring->dom0.operation_id = gp_work->operation_id;
+       xfe->ring->dom0.iws = gp_work->iws;
+       xfe->ring->dom0.gp_ret = gp_ret;
+       /* Call */
+       call_domu(xfe, TEE_XEN_GP_INVOKE_COMMAND_DONE, gp_work->id, ret);
+       /* Out */
+       ring_put(xfe);
+       kfree(gp_work);
+       tee_xfe_put(xfe);
+}
+
+static inline int xen_be_gp_invoke_command(struct tee_xfe *xfe)
+{
+       struct gp_work *gp_work;
+       int i, ret = 0;
+
+       gp_work = kzalloc(sizeof(*gp_work), GFP_KERNEL);
+       if (!gp_work)
+               return -ENOMEM;
+
+       /* Map tmpref buffers */
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               struct tee_xen_buffer *buffer = &xfe->buffers[i];
+               struct xen_be_map *map;
+               struct mcp_buffer_map b_map = {
+                       .offset = buffer->info->offset,
+                       .length = buffer->info->length,
+                       .flags = buffer->info->flags,
+               };
+
+               if (!buffer->info->flags)
+                       continue;
+
+               map = xen_be_map_create(buffer, xfe->pte_entries_max,
+                                       xfe->xdev->otherend_id);
+               if (IS_ERR(map)) {
+                       ret = PTR_ERR(map);
+                       goto err_map;
+               }
+
+               /* Shall be freed by session */
+               b_map.nr_pages = map->nr_pages;
+               gp_work->mmus[i] = tee_mmu_wrap(&map->deleter, map->pages,
+                                               &b_map);
+               if (IS_ERR(gp_work->mmus[i])) {
+                       xen_be_map_delete(map);
+                       ret = PTR_ERR(gp_work->mmus[i]);
+                       goto err_mmus;
+               }
+       }
+
+       tee_xfe_get(xfe);
+       gp_work->xfe = xfe;
+       gp_work->operation_id = xfe->ring->domu.operation_id;
+       gp_work->iws = xfe->ring->domu.iws;
+       gp_work->session_id = xfe->ring->domu.session_id;
+       gp_work->id = xfe->ring->domu.id;
+       INIT_WORK(&gp_work->work, xen_be_gp_invoke_command_worker);
+       schedule_work(&gp_work->work);
+       return 0;
+
+err_mmus:
+       for (i = 0; i < TEE_BUFFERS; i++)
+               if (!IS_ERR_OR_NULL(gp_work->mmus[i]))
+                       tee_mmu_put(gp_work->mmus[i]);
+err_map:
+       kfree(gp_work);
+       return ret;
+}
+
+static inline int xen_be_gp_request_cancellation(struct tee_xfe *xfe)
+{
+       client_gp_request_cancellation(xfe->client,
+                                      xfe->ring->domu.operation_id);
+       return 0;
+}
+
+static irqreturn_t xen_be_irq_handler_domu_th(int intr, void *arg)
+{
+       struct tee_xfe *xfe = arg;
+
+       if (!xfe->ring->domu.cmd) {
+               mc_dev_devel("Ignore IRQ with no command (on DomU connect)");
+               return IRQ_HANDLED;
+       }
+
+       /* DomU event, their side of ring locked by them */
+       schedule_work(&xfe->work);
+
+       return IRQ_HANDLED;
+}
+
+static void xen_be_irq_handler_domu_bh(struct work_struct *data)
+{
+       struct tee_xfe *xfe = container_of(data, struct tee_xfe, work);
+
+       xfe->ring->domu.otherend_ret = -EINVAL;
+       mc_dev_devel("DomU -> Dom0 command %u id %u",
+                    xfe->ring->domu.cmd, xfe->ring->domu.id);
+       switch (xfe->ring->domu.cmd) {
+       case TEE_XEN_DOMU_NONE:
+               return;
+       /* MC */
+       case TEE_XEN_MC_HAS_SESSIONS:
+               xfe->ring->domu.otherend_ret = xen_be_mc_has_sessions(xfe);
+               break;
+       case TEE_XEN_GET_VERSION:
+               xfe->ring->domu.otherend_ret = xen_be_get_version(xfe);
+               break;
+       case TEE_XEN_MC_OPEN_SESSION:
+               xfe->ring->domu.otherend_ret = xen_be_mc_open_session(xfe);
+               break;
+       case TEE_XEN_MC_OPEN_TRUSTLET:
+               xfe->ring->domu.otherend_ret = xen_be_mc_open_trustlet(xfe);
+               break;
+       case TEE_XEN_MC_CLOSE_SESSION:
+               xfe->ring->domu.otherend_ret = xen_be_mc_close_session(xfe);
+               break;
+       case TEE_XEN_MC_NOTIFY:
+               xfe->ring->domu.otherend_ret = xen_be_mc_notify(xfe);
+               break;
+       case TEE_XEN_MC_WAIT:
+               xfe->ring->domu.otherend_ret = xen_be_mc_wait(xfe);
+               break;
+       case TEE_XEN_MC_MAP:
+               xfe->ring->domu.otherend_ret = xen_be_mc_map(xfe);
+               break;
+       case TEE_XEN_MC_UNMAP:
+               xfe->ring->domu.otherend_ret = xen_be_mc_unmap(xfe);
+               break;
+       case TEE_XEN_MC_GET_ERR:
+               xfe->ring->domu.otherend_ret = xen_be_mc_get_err(xfe);
+               break;
+       /* GP */
+       case TEE_XEN_GP_REGISTER_SHARED_MEM:
+               xfe->ring->domu.otherend_ret =
+                       xen_be_gp_register_shared_mem(xfe);
+               break;
+       case TEE_XEN_GP_RELEASE_SHARED_MEM:
+               xfe->ring->domu.otherend_ret =
+                       xen_be_gp_release_shared_mem(xfe);
+               break;
+       case TEE_XEN_GP_OPEN_SESSION:
+               xfe->ring->domu.otherend_ret = xen_be_gp_open_session(xfe);
+               break;
+       case TEE_XEN_GP_CLOSE_SESSION:
+               xfe->ring->domu.otherend_ret = xen_be_gp_close_session(xfe);
+               break;
+       case TEE_XEN_GP_INVOKE_COMMAND:
+               xfe->ring->domu.otherend_ret = xen_be_gp_invoke_command(xfe);
+               break;
+       case TEE_XEN_GP_REQUEST_CANCELLATION:
+               xfe->ring->domu.otherend_ret =
+                       xen_be_gp_request_cancellation(xfe);
+               break;
+       }
+
+       mc_dev_devel("DomU -> Dom0 result %u id %u ret %d",
+                    xfe->ring->domu.cmd, xfe->ring->domu.id,
+                    xfe->ring->domu.otherend_ret);
+       notify_remote_via_irq(xfe->irq_domu);
+}
+
+/* Device */
+
+static const struct xenbus_device_id xen_be_ids[] = {
+       { "tee_xen" },
+       { "" }
+};
+
+/* Called when a front-end is created */
+static int xen_be_probe(struct xenbus_device *xdev,
+                       const struct xenbus_device_id *id)
+{
+       struct tee_xfe *xfe;
+       int ret = 0;
+
+       ret = xenbus_switch_state(xdev, XenbusStateInitWait);
+       if (ret) {
+               xenbus_dev_fatal(xdev, ret,
+                                "failed to change state to initwait");
+               return ret;
+       }
+
+       xfe = tee_xfe_create(xdev);
+       if (!xfe) {
+               ret = -ENOMEM;
+               xenbus_dev_fatal(xdev, ret, "failed to create FE struct");
+               goto err_xfe_create;
+       }
+
+       xfe->client = client_create(true);
+       if (!xfe->client) {
+               ret = -ENOMEM;
+               xenbus_dev_fatal(xdev, ret, "failed to create FE client");
+               goto err_client_create;
+       }
+
+       INIT_WORK(&xfe->work, xen_be_irq_handler_domu_bh);
+
+       mutex_lock(&l_ctx.xfes_mutex);
+       list_add_tail(&xfe->list, &l_ctx.xfes);
+       mutex_unlock(&l_ctx.xfes_mutex);
+
+       ret = xenbus_switch_state(xdev, XenbusStateInitialised);
+       if (ret) {
+               xenbus_dev_fatal(xdev, ret,
+                                "failed to change state to initialised");
+               goto err_switch_state;
+       }
+
+       return 0;
+
+err_switch_state:
+       mutex_lock(&l_ctx.xfes_mutex);
+       list_del(&xfe->list);
+       mutex_unlock(&l_ctx.xfes_mutex);
+err_client_create:
+       tee_xfe_put(xfe);
+err_xfe_create:
+       return ret;
+}
+
+/* Called when device is unregistered */
+static int xen_be_remove(struct xenbus_device *xdev)
+{
+       struct tee_xfe *xfe = dev_get_drvdata(&xdev->dev);
+
+       xenbus_switch_state(xdev, XenbusStateClosed);
+
+       mutex_lock(&l_ctx.xfes_mutex);
+       list_del(&xfe->list);
+       mutex_unlock(&l_ctx.xfes_mutex);
+
+       tee_xfe_put(xfe);
+       return 0;
+}
+
+static inline int xen_be_map_ring_valloc(struct xenbus_device *dev,
+                                        grant_ref_t ref, void **vaddr)
+{
+#if KERNEL_VERSION(4, 1, 0) <= LINUX_VERSION_CODE || \
+               defined(MC_XENBUS_MAP_RING_VALLOC_4_1)
+       return xenbus_map_ring_valloc(dev, &ref, 1, vaddr);
+#else
+       return xenbus_map_ring_valloc(dev, ref, vaddr);
+#endif
+}
+
+static inline void frontend_attach(struct tee_xfe *xfe)
+{
+       int domu_version;
+       int ret;
+       int i;
+
+       if (xenbus_read_driver_state(xfe->xdev->nodename) !=
+                       XenbusStateInitialised)
+               return;
+
+       ret = xenbus_gather(XBT_NIL, xfe->xdev->otherend,
+                           "ring-ref", "%u", &xfe->ring_ref,
+                           "pte-entries-max", "%u", &xfe->pte_entries_max,
+                           "event-channel-domu", "%u", &xfe->evtchn_domu,
+                           "event-channel-dom0", "%u", &xfe->evtchn_dom0,
+                           "domu-version", "%u", &domu_version, NULL);
+       if (ret) {
+               xenbus_dev_fatal(xfe->xdev, ret,
+                                "failed to gather other domain info");
+               return;
+       }
+
+       mc_dev_devel("ring ref %u evtchn domu=%u dom0=%u version=%u",
+                    xfe->ring_ref, xfe->evtchn_domu, xfe->evtchn_dom0,
+                    domu_version);
+
+       if (domu_version != TEE_XEN_VERSION) {
+               xenbus_dev_fatal(
+                       xfe->xdev, ret,
+                       "front- and back-end versions do not match: %d vs %d",
+                       domu_version, TEE_XEN_VERSION);
+               return;
+       }
+
+       ret = xen_be_map_ring_valloc(xfe->xdev, xfe->ring_ref, &xfe->ring_p);
+       if (ret < 0) {
+               xenbus_dev_fatal(xfe->xdev, ret, "failed to map ring");
+               return;
+       }
+       mc_dev_devel("mapped ring %p", xfe->ring_p);
+
+       /* Map buffers individually */
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               ret = xen_be_map_ring_valloc(xfe->xdev,
+                                            xfe->ring->domu.buffers[i].pmd_ref,
+                                            &xfe->buffers[i].data.addr);
+               if (ret < 0) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to map buffer page");
+                       return;
+               }
+
+               xfe->buffers[i].info = &xfe->ring->domu.buffers[i];
+       }
+
+       ret = bind_interdomain_evtchn_to_irqhandler(
+               xfe->xdev->otherend_id, xfe->evtchn_domu,
+               xen_be_irq_handler_domu_th, 0, "tee_be_domu", xfe);
+       if (ret < 0) {
+               xenbus_dev_fatal(xfe->xdev, ret,
+                                "failed to bind event channel to DomU IRQ");
+               return;
+       }
+
+       xfe->irq_domu = ret;
+       mc_dev_devel("bound DomU IRQ %d", xfe->irq_domu);
+
+       ret = bind_interdomain_evtchn_to_irqhandler(
+               xfe->xdev->otherend_id, xfe->evtchn_dom0,
+               xen_be_irq_handler_dom0_th, 0, "tee_be_dom0", xfe);
+       if (ret < 0) {
+               xenbus_dev_fatal(xfe->xdev, ret,
+                                "failed to bind event channel to Dom0 IRQ");
+               return;
+       }
+
+       xfe->irq_dom0 = ret;
+       mc_dev_devel("bound Dom0 IRQ %d", xfe->irq_dom0);
+
+       ret = xenbus_switch_state(xfe->xdev, XenbusStateConnected);
+       if (ret) {
+               xenbus_dev_fatal(xfe->xdev, ret,
+                                "failed to change state to connected");
+               return;
+       }
+}
+
+static inline void frontend_detach(struct tee_xfe *xfe)
+{
+       int i;
+
+       xenbus_switch_state(xfe->xdev, XenbusStateClosing);
+       if (xfe->irq_domu >= 0)
+               unbind_from_irqhandler(xfe->irq_domu, xfe);
+
+       if (xfe->irq_dom0 >= 0)
+               unbind_from_irqhandler(xfe->irq_dom0, xfe);
+
+       for (i = 0; i < TEE_BUFFERS; i++)
+               xenbus_unmap_ring_vfree(xfe->xdev, xfe->buffers[i].data.addr);
+
+       if (xfe->ring_p)
+               xenbus_unmap_ring_vfree(xfe->xdev, xfe->ring_p);
+}
+
+static void xen_be_frontend_changed(struct xenbus_device *xdev,
+                                   enum xenbus_state fe_state)
+{
+       struct tee_xfe *xfe = dev_get_drvdata(&xdev->dev);
+
+       mc_dev_devel("fe state changed to %d", fe_state);
+       switch (fe_state) {
+       case XenbusStateInitialising:
+       case XenbusStateInitWait:
+               break;
+       case XenbusStateInitialised:
+               frontend_attach(xfe);
+               break;
+       case XenbusStateConnected:
+               break;
+       case XenbusStateClosing:
+               frontend_detach(xfe);
+               break;
+       case XenbusStateUnknown:
+       case XenbusStateClosed:
+               device_unregister(&xfe->xdev->dev);
+               break;
+       case XenbusStateReconfiguring:
+       case XenbusStateReconfigured:
+               break;
+       }
+}
+
+static struct xenbus_driver xen_be_driver = {
+       .ids  = xen_be_ids,
+       .probe = xen_be_probe,
+       .remove = xen_be_remove,
+       .otherend_changed = xen_be_frontend_changed,
+};
+
+int xen_be_init(void)
+{
+       INIT_LIST_HEAD(&l_ctx.xfes);
+       mutex_init(&l_ctx.xfes_mutex);
+       return xenbus_register_backend(&xen_be_driver);
+}
+
+void xen_be_exit(void)
+{
+       xenbus_unregister_driver(&xen_be_driver);
+}
+
+#endif
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_be.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_be.h
new file mode 100644 (file)
index 0000000..f858dca
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_XEN_BE_H_
+#define _MC_XEN_BE_H_
+
+#include <linux/version.h>
+
+struct xen_be_map;
+
+#ifdef CONFIG_XEN
+int xen_be_init(void);
+void xen_be_exit(void);
+#else
+static inline int xen_be_init(void)
+{
+       return 0;
+}
+
+static inline void xen_be_exit(void)
+{
+}
+#endif
+
+#endif /* _MC_XEN_BE_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_common.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_common.c
new file mode 100644 (file)
index 0000000..07b4c91
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_XEN
+
+#include "main.h"
+#include "client.h"
+#include "xen_common.h"
+
+struct tee_xfe *tee_xfe_create(struct xenbus_device *xdev)
+{
+       struct tee_xfe *xfe;
+
+       /* Alloc */
+       xfe = kzalloc(sizeof(*xfe), GFP_KERNEL);
+       if (!xfe)
+               return NULL;
+
+       atomic_inc(&g_ctx.c_xen_fes);
+       /* Init */
+       dev_set_drvdata(&xdev->dev, xfe);
+       xfe->xdev = xdev;
+       kref_init(&xfe->kref);
+       xfe->evtchn_domu = -1;
+       xfe->evtchn_dom0 = -1;
+       xfe->irq_domu = -1;
+       xfe->irq_dom0 = -1;
+       INIT_LIST_HEAD(&xfe->list);
+       mutex_init(&xfe->ring_mutex);
+       init_completion(&xfe->ring_completion);
+       return xfe;
+}
+
+static void tee_xfe_release(struct kref *kref)
+{
+       struct tee_xfe *xfe = container_of(kref, struct tee_xfe, kref);
+
+       if (xfe->client)
+               client_close(xfe->client);
+
+       kfree(xfe);
+       atomic_dec(&g_ctx.c_xen_fes);
+}
+
+void tee_xfe_put(struct tee_xfe *xfe)
+{
+       kref_put(&xfe->kref, tee_xfe_release);
+}
+
+#endif
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_common.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_common.h
new file mode 100644 (file)
index 0000000..78111c7
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_XEN_COMMON_H_
+#define _MC_XEN_COMMON_H_
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+
+#include "public/mc_user.h"    /* many types */
+#include "mci/mciiwp.h"
+#include "mci/mcimcp.h"
+#include "mmu.h"               /* PMD/PTE max entries */
+#include "client.h"            /* For BE to treat other VMs as clients */
+
+#define TEE_XEN_VERSION        3
+
+#define TEE_BUFFERS    4
+
+enum tee_xen_domu_cmd {
+       TEE_XEN_DOMU_NONE,
+       TEE_XEN_GET_VERSION,
+       /* TEE_XEN_MC_OPEN_DEVICE = 11,         SWd does not support this */
+       /* TEE_XEN_MC_CLOSE_DEVICE,             SWd does not support this */
+       TEE_XEN_MC_HAS_SESSIONS = 13,
+       TEE_XEN_MC_OPEN_SESSION,
+       TEE_XEN_MC_OPEN_TRUSTLET,
+       TEE_XEN_MC_CLOSE_SESSION,
+       TEE_XEN_MC_NOTIFY,
+       TEE_XEN_MC_WAIT,
+       TEE_XEN_MC_MAP,
+       TEE_XEN_MC_UNMAP,
+       TEE_XEN_MC_GET_ERR,
+       /* TEE_XEN_GP_INITIALIZE_CONTEXT = 21,  SWd does not support this */
+       /* TEE_XEN_GP_FINALIZE_CONTEXT,         SWd does not support this */
+       TEE_XEN_GP_REGISTER_SHARED_MEM = 23,
+       TEE_XEN_GP_RELEASE_SHARED_MEM,
+       TEE_XEN_GP_OPEN_SESSION,
+       TEE_XEN_GP_CLOSE_SESSION,
+       TEE_XEN_GP_INVOKE_COMMAND,
+       TEE_XEN_GP_REQUEST_CANCELLATION,
+};
+
+enum tee_xen_dom0_cmd {
+       TEE_XEN_DOM0_NONE,
+       TEE_XEN_MC_WAIT_DONE = TEE_XEN_MC_WAIT,
+       TEE_XEN_GP_OPEN_SESSION_DONE = TEE_XEN_GP_OPEN_SESSION,
+       TEE_XEN_GP_CLOSE_SESSION_DONE = TEE_XEN_GP_CLOSE_SESSION,
+       TEE_XEN_GP_INVOKE_COMMAND_DONE = TEE_XEN_GP_INVOKE_COMMAND,
+};
+
+union tee_xen_mmu_table {
+       /* Array of references to pages (PTE_ENTRIES_MAX or PMD_ENTRIES_MAX) */
+       grant_ref_t             *refs;
+       /* Address of table */
+       void                    *addr;
+       /* Page for table */
+       unsigned long           page;
+};
+
+struct tee_xen_buffer_info {
+       /* Page Middle Directory, refs to tee_xen_pte_table's (full pages) */
+       grant_ref_t             pmd_ref;
+       /* Total number of refs for buffer */
+       u32                     nr_refs;
+       u64                     addr;           /* Unique VM address */
+       u32                     offset;
+       u32                     length;
+       u32                     flags;
+       u32                     sva;
+};
+
+/* Convenience structure to get buffer info and contents in one place */
+struct tee_xen_buffer {
+       struct tee_xen_buffer_info      *info;
+       union tee_xen_mmu_table         data;
+};
+
+struct tee_xen_ring {
+       /* DomU side, synchronous and asynchronous commands */
+       struct {
+               enum tee_xen_domu_cmd           cmd;            /* in */
+               u32                             id;             /* in (debug) */
+               /* Return code of this command from Dom0 */
+               int                             otherend_ret;   /* out */
+               struct mc_uuid_t                uuid;           /* in */
+               u32                             session_id;     /* in/out */
+               /* Buffers to share (4 for GP, 2 for mcOpenTrustlet) */
+               struct tee_xen_buffer_info      buffers[TEE_BUFFERS]; /* in */
+               /* MC */
+               struct mc_version_info          version_info;   /* out */
+               u32                             spid;           /* in */
+               s32                             timeout;        /* in */
+               s32                             err;            /* out */
+               /* GP */
+               u64                             operation_id;   /* in */
+               struct gp_return                gp_ret;         /* out */
+               struct interworld_session       iws;            /* in */
+       }                       domu;
+
+       /* Dom0 side, response to asynchronous command, never read by Dom0 */
+       struct {
+               enum tee_xen_dom0_cmd           cmd;            /* in */
+               u32                             id;             /* in (debug) */
+               /* Return code from command */
+               int                             cmd_ret;        /* in */
+               /* The operation id is used to match GP request and response */
+               u64                             operation_id;   /* in */
+               struct gp_return                gp_ret;         /* in */
+               struct interworld_session       iws;            /* in */
+               /* The session id is used to match MC request and response */
+               u32                             session_id;     /* in */
+       }                       dom0;
+};
+
+struct tee_xfe {
+       struct xenbus_device    *xdev;
+       struct kref             kref;
+       grant_ref_t             ring_ref;
+       int                     pte_entries_max;
+       int                     evtchn_domu;
+       int                     evtchn_dom0;
+       int                     irq_domu;
+       int                     irq_dom0;
+       struct list_head        list;
+       struct tee_client       *client;
+       struct work_struct      work;
+       /* Ring page */
+       union {
+               unsigned long           ring_ul;
+               void                    *ring_p;
+               struct tee_xen_ring     *ring;
+       };
+       /* Buffer pages */
+       struct tee_xen_buffer   buffers[TEE_BUFFERS];
+       struct mutex            ring_mutex;     /* Protect our side of ring */
+       struct completion       ring_completion;
+       bool                    ring_busy;
+       /* Unique ID for commands */
+       u32                     domu_cmd_id;
+};
+
+struct tee_xfe *tee_xfe_create(struct xenbus_device *xdev);
+static inline void tee_xfe_get(struct tee_xfe *xfe)
+{
+       kref_get(&xfe->kref);
+}
+
+void tee_xfe_put(struct tee_xfe *xfe);
+
+static inline void ring_get(struct tee_xfe *xfe)
+{
+       mutex_lock(&xfe->ring_mutex);
+       xfe->ring_busy = true;
+}
+
+static inline void ring_put(struct tee_xfe *xfe)
+{
+       xfe->ring_busy = false;
+       mutex_unlock(&xfe->ring_mutex);
+}
+
+#endif /* _MC_XEN_COMMON_H_ */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_fe.c b/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_fe.c
new file mode 100644 (file)
index 0000000..ac2aa48
--- /dev/null
@@ -0,0 +1,1179 @@
+/*
+ * Copyright (c) 2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+
+#include "mci/mciiwp.h"                /* struct interworld_session */
+
+#include "main.h"
+
+#ifdef TRUSTONIC_XEN_DOMU
+
+#include "admin.h"             /* tee_object* */
+#include "client.h"
+#include "iwp.h"
+#include "mcp.h"
+#include "xen_common.h"
+#include "xen_fe.h"
+
+#define page_to_gfn(p) (pfn_to_gfn(page_to_phys(p) >> XEN_PAGE_SHIFT))
+
+static struct {
+       int (*probe)(void);
+       int (*start)(void);
+       struct tee_xfe          *xfe;
+       /* MC sessions */
+       struct mutex            mc_sessions_lock;
+       struct list_head        mc_sessions;
+       /* GP operations */
+       struct mutex            gp_operations_lock;
+       struct list_head        gp_operations;
+} l_ctx;
+
+struct xen_fe_mc_session {
+       struct list_head                list;
+       struct completion               completion;
+       int                             ret;
+       struct mcp_session              *session;
+};
+
+struct xen_fe_gp_operation {
+       struct list_head                list;
+       struct completion               completion;
+       int                             ret;
+       u64                             slot;
+       struct gp_return                *gp_ret;
+       struct interworld_session       *iws;
+};
+
+static inline struct xen_fe_mc_session *find_mc_session(u32 session_id)
+{
+       struct xen_fe_mc_session *session = ERR_PTR(-ENXIO), *candidate;
+
+       mutex_lock(&l_ctx.mc_sessions_lock);
+       list_for_each_entry(candidate, &l_ctx.mc_sessions, list) {
+               struct mcp_session *mcp_session = candidate->session;
+
+               if (mcp_session->sid == session_id) {
+                       session = candidate;
+                       break;
+               }
+       }
+       mutex_unlock(&l_ctx.mc_sessions_lock);
+
+       WARN(IS_ERR(session), "MC session not found for ID %u", session_id);
+       return session;
+}
+
+static inline int xen_fe_mc_wait_done(struct tee_xfe *xfe)
+{
+       struct xen_fe_mc_session *session;
+
+       mc_dev_devel("received response to mc_wait for session %x: %d",
+                    xfe->ring->dom0.session_id, xfe->ring->dom0.cmd_ret);
+       session = find_mc_session(xfe->ring->dom0.session_id);
+       if (IS_ERR(session))
+               return PTR_ERR(session);
+
+       session->ret = xfe->ring->dom0.cmd_ret;
+       complete(&session->completion);
+       return 0;
+}
+
+static struct xen_fe_gp_operation *find_gp_operation(u64 operation_id)
+{
+       struct xen_fe_gp_operation *operation = ERR_PTR(-ENXIO), *candidate;
+
+       mutex_lock(&l_ctx.gp_operations_lock);
+       list_for_each_entry(candidate, &l_ctx.gp_operations, list) {
+               if (candidate->slot == operation_id) {
+                       operation = candidate;
+                       list_del(&operation->list);
+                       break;
+               }
+       }
+       mutex_unlock(&l_ctx.gp_operations_lock);
+
+       WARN(IS_ERR(operation), "GP operation not found for op id %llx",
+            operation_id);
+       return operation;
+}
+
+static inline int xen_fe_gp_open_session_done(struct tee_xfe *xfe)
+{
+       struct xen_fe_gp_operation *operation;
+
+       mc_dev_devel("received response to gp_open_session for op id %llx",
+                    xfe->ring->dom0.operation_id);
+       operation = find_gp_operation(xfe->ring->dom0.operation_id);
+       if (IS_ERR(operation))
+               return PTR_ERR(operation);
+
+       operation->ret = xfe->ring->dom0.cmd_ret;
+       *operation->iws = xfe->ring->dom0.iws;
+       *operation->gp_ret = xfe->ring->dom0.gp_ret;
+       complete(&operation->completion);
+       return 0;
+}
+
+static inline int xen_fe_gp_close_session_done(struct tee_xfe *xfe)
+{
+       struct xen_fe_gp_operation *operation;
+
+       mc_dev_devel("received response to gp_close_session for op id %llx",
+                    xfe->ring->dom0.operation_id);
+       operation = find_gp_operation(xfe->ring->dom0.operation_id);
+       if (IS_ERR(operation))
+               return PTR_ERR(operation);
+
+       operation->ret = xfe->ring->dom0.cmd_ret;
+       complete(&operation->completion);
+       return 0;
+}
+
+static inline int xen_fe_gp_invoke_command_done(struct tee_xfe *xfe)
+{
+       struct xen_fe_gp_operation *operation;
+
+       mc_dev_devel("received response to gp_invoke_command for op id %llx",
+                    xfe->ring->dom0.operation_id);
+       operation = find_gp_operation(xfe->ring->dom0.operation_id);
+       if (IS_ERR(operation))
+               return PTR_ERR(operation);
+
+       operation->ret = xfe->ring->dom0.cmd_ret;
+       *operation->iws = xfe->ring->dom0.iws;
+       *operation->gp_ret = xfe->ring->dom0.gp_ret;
+       complete(&operation->completion);
+       return 0;
+}
+
+static irqreturn_t xen_fe_irq_handler_dom0_th(int intr, void *arg)
+{
+       struct tee_xfe *xfe = arg;
+
+       /* Dom0 event, their side of ring locked by them */
+       schedule_work(&xfe->work);
+
+       return IRQ_HANDLED;
+}
+
+static void xen_fe_irq_handler_dom0_bh(struct work_struct *data)
+{
+       struct tee_xfe *xfe = container_of(data, struct tee_xfe, work);
+       int ret = -EINVAL;
+
+       mc_dev_devel("Dom0 -> DomU command %u id %u cmd ret %d",
+                    xfe->ring->dom0.cmd, xfe->ring->dom0.id,
+                    xfe->ring->dom0.cmd_ret);
+       switch (xfe->ring->dom0.cmd) {
+       case TEE_XEN_DOM0_NONE:
+               return;
+       case TEE_XEN_MC_WAIT_DONE:
+               ret = xen_fe_mc_wait_done(xfe);
+               break;
+       case TEE_XEN_GP_OPEN_SESSION_DONE:
+               ret = xen_fe_gp_open_session_done(xfe);
+               break;
+       case TEE_XEN_GP_CLOSE_SESSION_DONE:
+               ret = xen_fe_gp_close_session_done(xfe);
+               break;
+       case TEE_XEN_GP_INVOKE_COMMAND_DONE:
+               ret = xen_fe_gp_invoke_command_done(xfe);
+               break;
+       }
+
+       if (ret)
+               mc_dev_err(ret, "Dom0 -> DomU result %u id %u",
+                          xfe->ring->dom0.cmd, xfe->ring->dom0.id);
+       else
+               mc_dev_devel("Dom0 -> DomU result %u id %u",
+                            xfe->ring->dom0.cmd, xfe->ring->dom0.id);
+
+       notify_remote_via_evtchn(xfe->evtchn_dom0);
+}
+
+/* Buffer management */
+
+struct xen_fe_map {
+       /* Array of PTE tables, so we can release the associated buffer refs */
+       union tee_xen_mmu_table *pte_tables;
+       int                     nr_pte_tables;
+       int                     nr_refs;
+       bool                    readonly;
+       int                     pages_created;  /* Leak check */
+       int                     refs_granted;   /* Leak check */
+       /* To auto-delete */
+       struct tee_deleter deleter;
+};
+
+static void xen_fe_map_release_pmd(struct xen_fe_map *map,
+                                  const struct tee_xen_buffer *buffer)
+{
+       int i;
+
+       if (IS_ERR_OR_NULL(map))
+               return;
+
+       for (i = 0; i < map->nr_pte_tables; i++) {
+               gnttab_end_foreign_access(buffer->data.refs[i], true, 0);
+               map->refs_granted--;
+               mc_dev_devel("unmapped table %d ref %u",
+                            i, buffer->data.refs[i]);
+       }
+}
+
+static void xen_fe_map_release(struct xen_fe_map *map,
+                              const struct tee_xen_buffer *buffer)
+{
+       int nr_refs_left = map->nr_refs;
+       int i;
+
+       if (buffer)
+               xen_fe_map_release_pmd(map, buffer);
+
+       for (i = 0; i < map->nr_pte_tables; i++) {
+               int j, nr_refs = nr_refs_left;
+
+               if (nr_refs > PTE_ENTRIES_MAX)
+                       nr_refs = PTE_ENTRIES_MAX;
+
+               for (j = 0; j < nr_refs; j++) {
+                       gnttab_end_foreign_access(map->pte_tables[i].refs[j],
+                                                 map->readonly, 0);
+                       map->refs_granted--;
+                       nr_refs_left--;
+                       mc_dev_devel("unmapped [%d, %d] ref %u, left %d",
+                                    i, j, map->pte_tables[i].refs[j],
+                                    nr_refs_left);
+               }
+
+               free_page(map->pte_tables[i].page);
+               map->pages_created--;
+       }
+
+       kfree(map->pte_tables);
+       if (map->pages_created || map->refs_granted)
+               mc_dev_err(-EUCLEAN,
+                          "leak detected: still in use %d, still ref'd %d",
+                          map->pages_created, map->refs_granted);
+
+       kfree(map);
+       atomic_dec(&g_ctx.c_xen_maps);
+       mc_dev_devel("freed map %p: refs=%u nr_pte_tables=%d",
+                    map, map->nr_refs, map->nr_pte_tables);
+}
+
+static void xen_fe_map_delete(void *arg)
+{
+       struct xen_fe_map *map = arg;
+
+       xen_fe_map_release(map, NULL);
+}
+
+static struct xen_fe_map *xen_fe_map_create(struct tee_xen_buffer *buffer,
+                                           const struct mcp_buffer_map *b_map,
+                                           int dom_id)
+{
+       /* b_map describes the PMD which contains pointers to PTE tables */
+       uintptr_t *pte_tables = (uintptr_t *)(uintptr_t)b_map->addr;
+       struct xen_fe_map *map;
+       unsigned long nr_pte_tables =
+               (b_map->nr_pages + PTE_ENTRIES_MAX - 1) / PTE_ENTRIES_MAX;
+       unsigned long nr_pages_left = b_map->nr_pages;
+       int readonly = !(b_map->flags & MC_IO_MAP_OUTPUT);
+       int ret, i;
+
+       /*
+        * We always map the same way, to simplify:
+        * * the buffer contains references to PTE pages
+        * * PTE pages contain references to the buffer pages
+        */
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return ERR_PTR(-ENOMEM);
+
+       atomic_inc(&g_ctx.c_xen_maps);
+       map->readonly = readonly;
+
+       map->pte_tables = kcalloc(nr_pte_tables,
+                                 sizeof(union tee_xen_mmu_table), GFP_KERNEL);
+       if (!map->pte_tables) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0; i < nr_pte_tables; i++) {
+               /* As expected, PTE tables contain pointers to buffer pages */
+               struct page **pages = (struct page **)pte_tables[i];
+               unsigned long nr_pages = nr_pages_left;
+               int j;
+
+               map->pte_tables[i].page = get_zeroed_page(GFP_KERNEL);
+               if (!map->pte_tables[i].page) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               map->pages_created++;
+               map->nr_pte_tables++;
+
+               if (nr_pages > PTE_ENTRIES_MAX)
+                       nr_pages = PTE_ENTRIES_MAX;
+
+               /* Create ref for this PTE table */
+               ret = gnttab_grant_foreign_access(
+                       dom_id, virt_to_gfn(map->pte_tables[i].addr), true);
+               if (ret < 0) {
+                       mc_dev_err(
+                               ret,
+                               "gnttab_grant_foreign_access failed:\t"
+                               "PTE table %d", i);
+                       goto err;
+               }
+
+               map->refs_granted++;
+               buffer->data.refs[i] = ret;
+               mc_dev_devel("mapped table %d ref %u for %lu pages",
+                            i, buffer->data.refs[i], nr_pages);
+
+               /* Create refs for pages */
+               for (j = 0; j < nr_pages; j++) {
+                       ret = gnttab_grant_foreign_access(
+                               dom_id, page_to_gfn(pages[j]), readonly);
+                       if (ret < 0) {
+                               mc_dev_err(
+                                       ret,
+                                       "gnttab_grant_foreign_access failed:\t"
+                                       "PTE %d pg %d", i, j);
+                               goto err;
+                       }
+
+                       map->refs_granted++;
+                       map->pte_tables[i].refs[j] = ret;
+                       map->nr_refs++;
+                       nr_pages_left--;
+                       mc_dev_devel("mapped [%d, %d] ref %u, left %lu",
+                                    i, j, map->pte_tables[i].refs[j],
+                                    nr_pages_left);
+               }
+       }
+
+       buffer->info->nr_refs = map->nr_refs;
+       buffer->info->addr = (uintptr_t)b_map->mmu;
+       buffer->info->offset = b_map->offset;
+       buffer->info->length = b_map->length;
+       buffer->info->flags = b_map->flags;
+
+       /* Auto-delete */
+       map->deleter.object = map;
+       map->deleter.delete = xen_fe_map_delete;
+       tee_mmu_set_deleter(b_map->mmu, &map->deleter);
+
+       mc_dev_devel("created map %p: refs=%u nr_pte_tables=%d",
+                    map, map->nr_refs, map->nr_pte_tables);
+       return map;
+
+err:
+       xen_fe_map_release(map, buffer);
+       return ERR_PTR(ret);
+}
+
+/* DomU call to Dom0 */
+
+/* Must be called under xfe->ring_mutex */
+static inline void call_dom0(struct tee_xfe *xfe, enum tee_xen_domu_cmd cmd)
+{
+       WARN_ON(!xfe->ring_busy);
+
+       xfe->domu_cmd_id++;
+       if (!xfe->domu_cmd_id)
+               xfe->domu_cmd_id++;
+
+       /* Set command and ID */
+       xfe->ring->domu.cmd = cmd;
+       xfe->ring->domu.id = xfe->domu_cmd_id;
+       mc_dev_devel("DomU -> Dom0 request %u id %u pid %d",
+                    xfe->ring->domu.cmd, xfe->ring->domu.id, current->pid);
+       /* Call */
+       notify_remote_via_evtchn(xfe->evtchn_domu);
+       wait_for_completion(&xfe->ring_completion);
+}
+
+/* Will be called back under xfe->ring_mutex */
+static irqreturn_t xen_fe_irq_handler_domu_th(int intr, void *arg)
+{
+       struct tee_xfe *xfe = arg;
+
+       WARN_ON(!xfe->ring_busy);
+
+       /* Response to a domU command, our side of ring locked by us */
+       mc_dev_devel("DomU -> Dom0 response %u id %u ret %d",
+                    xfe->ring->domu.cmd, xfe->ring->domu.id,
+                    xfe->ring->domu.otherend_ret);
+       xfe->ring->domu.cmd = TEE_XEN_DOMU_NONE;
+       xfe->ring->domu.id = 0;
+       complete(&xfe->ring_completion);
+
+       return IRQ_HANDLED;
+}
+
+/* MC protocol interface */
+
+int xen_mc_get_version(struct mc_version_info *version_info)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+
+       ring_get(xfe);
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GET_VERSION);
+       /* Out */
+       memcpy(version_info, &xfe->ring->domu.version_info,
+              sizeof(*version_info));
+       ring_put(xfe);
+       return xfe->ring->domu.otherend_ret;
+}
+
+int xen_mc_open_session(struct mcp_session *session,
+                       struct mcp_open_info *info)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct xen_fe_mc_session *fe_mc_session;
+       struct tee_xen_buffer *ta_buffer = &xfe->buffers[1];
+       struct tee_xen_buffer *tci_buffer = &xfe->buffers[0];
+       struct xen_fe_map *ta_map = NULL;
+       struct xen_fe_map *tci_map = NULL;
+       struct tee_mmu *mmu = NULL;
+       enum tee_xen_domu_cmd cmd;
+       int ret;
+
+       fe_mc_session = kzalloc(sizeof(*fe_mc_session), GFP_KERNEL);
+       if (!fe_mc_session)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&fe_mc_session->list);
+       init_completion(&fe_mc_session->completion);
+       fe_mc_session->session = session;
+
+       ring_get(xfe);
+       /* In */
+       if (info->type == TEE_MC_UUID) {
+               cmd = TEE_XEN_MC_OPEN_SESSION;
+               xfe->ring->domu.uuid = *info->uuid;
+       } else {
+               struct mc_ioctl_buffer buf = {
+                       .va = info->va,
+                       .len = info->len,
+                       .flags = MC_IO_MAP_INPUT,
+               };
+               struct mcp_buffer_map b_map;
+
+               cmd = TEE_XEN_MC_OPEN_TRUSTLET;
+               /* Use an otherwise unused field to pass the SPID */
+               xfe->ring->domu.spid = info->spid;
+               mmu = tee_mmu_create(info->user ? current->mm : NULL, &buf);
+               if (IS_ERR(mmu)) {
+                       ret = PTR_ERR(mmu);
+                       mmu = NULL;
+                       goto out;
+               }
+
+               tee_mmu_buffer(mmu, &b_map);
+               ta_map = xen_fe_map_create(ta_buffer, &b_map,
+                                          xfe->xdev->otherend_id);
+               if (IS_ERR(ta_map)) {
+                       ret = PTR_ERR(ta_map);
+                       goto out;
+               }
+       }
+
+       /* Convert IPAs to grant references in-place */
+       if (info->tci_mmu) {
+               struct mcp_buffer_map b_map;
+
+               tee_mmu_buffer(info->tci_mmu, &b_map);
+               tci_map = xen_fe_map_create(tci_buffer, &b_map,
+                                           xfe->xdev->otherend_id);
+               if (IS_ERR(tci_map)) {
+                       ret = PTR_ERR(tci_map);
+                       goto out;
+               }
+       } else {
+               tci_buffer->info->flags = 0;
+       }
+
+       /* Call */
+       call_dom0(xfe, cmd);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret)
+               session->sid = xfe->ring->domu.session_id;
+
+out:
+       if (!ret) {
+               mutex_lock(&l_ctx.mc_sessions_lock);
+               list_add_tail(&fe_mc_session->list, &l_ctx.mc_sessions);
+               mutex_unlock(&l_ctx.mc_sessions_lock);
+       } else {
+               kfree(fe_mc_session);
+       }
+
+       xen_fe_map_release_pmd(ta_map, ta_buffer);
+       xen_fe_map_release_pmd(tci_map, tci_buffer);
+       if (mmu)
+               tee_mmu_put(mmu);
+
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_mc_close_session(struct mcp_session *session)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct xen_fe_mc_session *fe_mc_session;
+       int ret;
+
+       fe_mc_session = find_mc_session(session->sid);
+       if (!fe_mc_session)
+               return -ENXIO;
+
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session->sid;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_MC_CLOSE_SESSION);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret) {
+               mutex_lock(&l_ctx.mc_sessions_lock);
+               session->state = MCP_SESSION_CLOSED;
+               list_del(&fe_mc_session->list);
+               mutex_unlock(&l_ctx.mc_sessions_lock);
+               kfree(fe_mc_session);
+       }
+
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_mc_notify(struct mcp_session *session)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       int ret;
+
+       mc_dev_devel("MC notify session %x", session->sid);
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session->sid;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_MC_NOTIFY);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_mc_wait(struct mcp_session *session, s32 timeout, bool silent_expiry)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct xen_fe_mc_session *fe_mc_session;
+       int ret;
+
+       /* Locked by caller so no two waits can happen on one session */
+       fe_mc_session = find_mc_session(session->sid);
+       if (!fe_mc_session)
+               return -ENXIO;
+
+       fe_mc_session->ret = 0;
+
+       mc_dev_devel("MC wait session %x", session->sid);
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session->sid;
+       xfe->ring->domu.timeout = timeout;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_MC_WAIT);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       ring_put(xfe);
+
+       if (ret)
+               return ret;
+
+       /* Now wait for notification from Dom0 */
+       ret = wait_for_completion_interruptible(&fe_mc_session->completion);
+       if (!ret)
+               ret = fe_mc_session->ret;
+
+       return ret;
+}
+
+int xen_mc_map(u32 session_id, struct tee_mmu *mmu, u32 *sva)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       struct mcp_buffer_map b_map;
+       struct xen_fe_map *map = NULL;
+       int ret;
+
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session_id;
+       tee_mmu_buffer(mmu, &b_map);
+       map = xen_fe_map_create(buffer, &b_map, xfe->xdev->otherend_id);
+       if (IS_ERR(map)) {
+               ret = PTR_ERR(map);
+               goto out;
+       }
+
+       /* Call */
+       call_dom0(xfe, TEE_XEN_MC_MAP);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret) {
+               *sva = buffer->info->sva;
+               atomic_inc(&g_ctx.c_maps);
+       }
+
+out:
+       xen_fe_map_release_pmd(map, buffer);
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_mc_unmap(u32 session_id, const struct mcp_buffer_map *map)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       int ret;
+
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session_id;
+       buffer->info->length = map->length;
+       buffer->info->sva = map->secure_va;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_MC_UNMAP);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret)
+               atomic_dec(&g_ctx.c_maps);
+
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_mc_get_err(struct mcp_session *session, s32 *err)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       int ret;
+
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session->sid;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_MC_GET_ERR);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret)
+               *err = xfe->ring->domu.err;
+
+       mc_dev_devel("MC get_err session %x err %d", session->sid, *err);
+       ring_put(xfe);
+       return ret;
+}
+
+/* GP protocol interface */
+
+int xen_gp_register_shared_mem(struct tee_mmu *mmu, u32 *sva,
+                              struct gp_return *gp_ret)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       struct mcp_buffer_map b_map;
+       struct xen_fe_map *map = NULL;
+       int ret;
+
+       ring_get(xfe);
+       /* In */
+       tee_mmu_buffer(mmu, &b_map);
+       map = xen_fe_map_create(buffer, &b_map, xfe->xdev->otherend_id);
+       if (IS_ERR(map)) {
+               ret = PTR_ERR(map);
+               goto out;
+       }
+
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GP_REGISTER_SHARED_MEM);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret) {
+               *sva = buffer->info->sva;
+               atomic_inc(&g_ctx.c_maps);
+       }
+
+       if (xfe->ring->domu.gp_ret.origin)
+               *gp_ret = xfe->ring->domu.gp_ret;
+
+out:
+       xen_fe_map_release_pmd(map, buffer);
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_gp_release_shared_mem(struct mcp_buffer_map *map)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct tee_xen_buffer *buffer = &xfe->buffers[0];
+       int ret;
+
+       ring_get(xfe);
+       /* In */
+       buffer->info->addr = (uintptr_t)map->mmu;
+       buffer->info->length = map->length;
+       buffer->info->flags = map->flags;
+       buffer->info->sva = map->secure_va;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GP_RELEASE_SHARED_MEM);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       if (!ret)
+               atomic_dec(&g_ctx.c_maps);
+
+       ring_put(xfe);
+       return ret;
+}
+
+int xen_gp_open_session(struct iwp_session *session,
+                       const struct mc_uuid_t *uuid,
+                       const struct iwp_buffer_map *b_maps,
+                       struct interworld_session *iws,
+                       struct interworld_session *op_iws,
+                       struct gp_return *gp_ret)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct xen_fe_gp_operation operation = { .ret = 0 };
+       struct xen_fe_map *maps[4] = { NULL, NULL, NULL, NULL };
+       int i, ret;
+
+       /* Prepare operation first not to be racey */
+       INIT_LIST_HEAD(&operation.list);
+       init_completion(&operation.completion);
+       /* Note: slot is a unique identifier for a session/operation */
+       operation.slot = session->slot;
+       operation.gp_ret = gp_ret;
+       operation.iws = iws;
+       mutex_lock(&l_ctx.gp_operations_lock);
+       list_add_tail(&operation.list, &l_ctx.gp_operations);
+       mutex_unlock(&l_ctx.gp_operations_lock);
+
+       ring_get(xfe);
+       /* The operation may contain tmpref's to map */
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               if (!b_maps[i].map.addr) {
+                       xfe->buffers[i].info->flags = 0;
+                       continue;
+               }
+
+               maps[i] = xen_fe_map_create(&xfe->buffers[i], &b_maps[i].map,
+                                           xfe->xdev->otherend_id);
+               if (IS_ERR(maps[i])) {
+                       ret = PTR_ERR(maps[i]);
+                       goto err;
+               }
+       }
+
+       /* In */
+       xfe->ring->domu.uuid = *uuid;
+       xfe->ring->domu.operation_id = session->slot;
+       xfe->ring->domu.iws = *op_iws;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GP_OPEN_SESSION);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+err:
+       for (i = 0; i < TEE_BUFFERS; i++)
+               xen_fe_map_release_pmd(maps[i], &xfe->buffers[i]);
+
+       ring_put(xfe);
+       if (ret) {
+               mutex_lock(&l_ctx.gp_operations_lock);
+               list_del(&operation.list);
+               mutex_unlock(&l_ctx.gp_operations_lock);
+               return ret;
+       }
+
+       /* Now wait for notification from Dom0 */
+       wait_for_completion(&operation.completion);
+       /* FIXME origins? */
+       return operation.ret;
+}
+
+int xen_gp_close_session(struct iwp_session *session)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct xen_fe_gp_operation operation = { .ret = 0 };
+       int ret;
+
+       /* Prepare operation first not to be racey */
+       INIT_LIST_HEAD(&operation.list);
+       init_completion(&operation.completion);
+       /* Note: slot is a unique identifier for a session/operation */
+       operation.slot = session->slot;
+       mutex_lock(&l_ctx.gp_operations_lock);
+       list_add_tail(&operation.list, &l_ctx.gp_operations);
+       mutex_unlock(&l_ctx.gp_operations_lock);
+
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.session_id = session->sid;
+       xfe->ring->domu.operation_id = session->slot;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GP_CLOSE_SESSION);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       ring_put(xfe);
+       if (ret) {
+               mutex_lock(&l_ctx.gp_operations_lock);
+               list_del(&operation.list);
+               mutex_unlock(&l_ctx.gp_operations_lock);
+               return ret;
+       }
+
+       /* Now wait for notification from Dom0 */
+       wait_for_completion(&operation.completion);
+       return operation.ret;
+}
+
+int xen_gp_invoke_command(struct iwp_session *session,
+                         const struct iwp_buffer_map *b_maps,
+                         struct interworld_session *iws,
+                         struct gp_return *gp_ret)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       struct xen_fe_gp_operation operation = { .ret = 0 };
+       struct xen_fe_map *maps[4] = { NULL, NULL, NULL, NULL };
+       int i, ret;
+
+       /* Prepare operation first not to be racey */
+       INIT_LIST_HEAD(&operation.list);
+       init_completion(&operation.completion);
+       /* Note: slot is a unique identifier for a session/operation */
+       operation.slot = session->slot;
+       operation.gp_ret = gp_ret;
+       operation.iws = iws;
+       mutex_lock(&l_ctx.gp_operations_lock);
+       list_add_tail(&operation.list, &l_ctx.gp_operations);
+       mutex_unlock(&l_ctx.gp_operations_lock);
+
+       ring_get(xfe);
+       /* The operation is in op_iws and may contain tmpref's to map */
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               if (!b_maps[i].map.addr) {
+                       xfe->buffers[i].info->flags = 0;
+                       continue;
+               }
+
+               maps[i] = xen_fe_map_create(&xfe->buffers[i], &b_maps[i].map,
+                                           xfe->xdev->otherend_id);
+               if (IS_ERR(maps[i])) {
+                       ret = PTR_ERR(maps[i]);
+                       goto err;
+               }
+       }
+
+       /* In */
+       xfe->ring->domu.session_id = session->sid;
+       xfe->ring->domu.operation_id = session->slot;
+       xfe->ring->domu.iws = *iws;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GP_INVOKE_COMMAND);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+err:
+       for (i = 0; i < TEE_BUFFERS; i++)
+               xen_fe_map_release_pmd(maps[i], &xfe->buffers[i]);
+
+       ring_put(xfe);
+       if (ret) {
+               mutex_lock(&l_ctx.gp_operations_lock);
+               list_del(&operation.list);
+               mutex_unlock(&l_ctx.gp_operations_lock);
+               return ret;
+       }
+
+       /* Now wait for notification from Dom0 */
+       wait_for_completion(&operation.completion);
+       /* FIXME origins? */
+       return operation.ret;
+}
+
+int xen_gp_request_cancellation(u64 slot)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+       int ret;
+
+       ring_get(xfe);
+       /* In */
+       xfe->ring->domu.operation_id = slot;
+       /* Call */
+       call_dom0(xfe, TEE_XEN_GP_REQUEST_CANCELLATION);
+       /* Out */
+       ret = xfe->ring->domu.otherend_ret;
+       ring_put(xfe);
+       return ret;
+}
+
+/* Device */
+
+static inline void xfe_release(struct tee_xfe *xfe)
+{
+       int i;
+
+       if (xfe->irq_domu >= 0)
+               unbind_from_irqhandler(xfe->irq_domu, xfe);
+
+       if (xfe->irq_dom0 >= 0)
+               unbind_from_irqhandler(xfe->irq_dom0, xfe);
+
+       if (xfe->evtchn_domu >= 0)
+               xenbus_free_evtchn(xfe->xdev, xfe->evtchn_domu);
+
+       if (xfe->evtchn_dom0 >= 0)
+               xenbus_free_evtchn(xfe->xdev, xfe->evtchn_dom0);
+
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               if (!xfe->buffers[i].data.page)
+                       break;
+
+               gnttab_end_foreign_access(xfe->ring->domu.buffers[i].pmd_ref, 0,
+                                         xfe->buffers[i].data.page);
+               free_page(xfe->buffers[i].data.page);
+       }
+
+       if (xfe->ring_ul) {
+               gnttab_end_foreign_access(xfe->ring_ref, 0, xfe->ring_ul);
+               free_page(xfe->ring_ul);
+       }
+
+       kfree(xfe);
+}
+
+static inline struct tee_xfe *xfe_create(struct xenbus_device *xdev)
+{
+       struct tee_xfe *xfe;
+       struct xenbus_transaction trans;
+       int i, ret = -ENOMEM;
+
+       /* Alloc */
+       xfe = tee_xfe_create(xdev);
+       if (!xfe)
+               return ERR_PTR(-ENOMEM);
+
+       /* Create shared information buffer */
+       xfe->ring_ul = get_zeroed_page(GFP_KERNEL);
+       if (!xfe->ring_ul)
+               goto err;
+
+       /* Connect */
+       ret = xenbus_grant_ring(xfe->xdev, xfe->ring, 1, &xfe->ring_ref);
+       if (ret < 0)
+               goto err;
+
+       for (i = 0; i < TEE_BUFFERS; i++) {
+               xfe->buffers[i].data.page = get_zeroed_page(GFP_KERNEL);
+               if (!xfe->buffers[i].data.page)
+                       goto err;
+
+               ret = xenbus_grant_ring(xfe->xdev, xfe->buffers[i].data.addr, 1,
+                                       &xfe->ring->domu.buffers[i].pmd_ref);
+               if (ret < 0)
+                       goto err;
+
+               xfe->buffers[i].info = &xfe->ring->domu.buffers[i];
+       }
+
+       ret = xenbus_alloc_evtchn(xfe->xdev, &xfe->evtchn_domu);
+       if (ret)
+               goto err;
+
+       ret = xenbus_alloc_evtchn(xfe->xdev, &xfe->evtchn_dom0);
+       if (ret)
+               goto err;
+
+       ret = bind_evtchn_to_irqhandler(xfe->evtchn_domu,
+                                       xen_fe_irq_handler_domu_th, 0,
+                                       "tee_fe_domu", xfe);
+       if (ret < 0)
+               goto err;
+
+       xfe->irq_domu = ret;
+
+       ret = bind_evtchn_to_irqhandler(xfe->evtchn_dom0,
+                                       xen_fe_irq_handler_dom0_th, 0,
+                                       "tee_fe_dom0", xfe);
+       if (ret < 0)
+               goto err;
+
+       xfe->irq_dom0 = ret;
+
+       /* Publish */
+       do {
+               ret = xenbus_transaction_start(&trans);
+               if (ret) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to start transaction");
+                       goto err_transaction;
+               }
+
+               /* Ring is one page to support older kernels */
+               ret = xenbus_printf(trans, xfe->xdev->nodename,
+                                   "ring-ref", "%u", xfe->ring_ref);
+               if (ret) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to write ring ref");
+                       goto err_transaction;
+               }
+
+               ret = xenbus_printf(trans, xfe->xdev->nodename,
+                                   "pte-entries-max", "%u",
+                                   PTE_ENTRIES_MAX);
+               if (ret) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to write PTE entries max");
+                       goto err_transaction;
+               }
+
+               ret = xenbus_printf(trans, xfe->xdev->nodename,
+                                   "event-channel-domu", "%u",
+                                   xfe->evtchn_domu);
+               if (ret) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to write event channel domu");
+                       goto err_transaction;
+               }
+
+               ret = xenbus_printf(trans, xfe->xdev->nodename,
+                                   "event-channel-dom0", "%u",
+                                   xfe->evtchn_dom0);
+               if (ret) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to write event channel dom0");
+                       goto err_transaction;
+               }
+
+               ret = xenbus_printf(trans, xfe->xdev->nodename,
+                                   "domu-version", "%u", TEE_XEN_VERSION);
+               if (ret) {
+                       xenbus_dev_fatal(xfe->xdev, ret,
+                                        "failed to write version");
+                       goto err_transaction;
+               }
+
+               ret = xenbus_transaction_end(trans, 0);
+               if (ret) {
+                       if (ret == -EAGAIN)
+                               mc_dev_devel("retry");
+                       else
+                               xenbus_dev_fatal(xfe->xdev, ret,
+                                                "failed to end transaction");
+               }
+       } while (ret == -EAGAIN);
+
+       mc_dev_devel("evtchn domu=%u dom0=%u version=%u",
+                    xfe->evtchn_domu, xfe->evtchn_dom0, TEE_XEN_VERSION);
+       xenbus_switch_state(xfe->xdev, XenbusStateInitialised);
+       return xfe;
+
+err_transaction:
+err:
+       xenbus_switch_state(xfe->xdev, XenbusStateClosed);
+       xfe_release(xfe);
+       return ERR_PTR(ret);
+}
+
+static const struct xenbus_device_id xen_fe_ids[] = {
+       { "tee_xen" },
+       { "" }
+};
+
+static int xen_fe_probe(struct xenbus_device *xdev,
+                       const struct xenbus_device_id *id)
+{
+       int ret;
+
+       ret = l_ctx.probe();
+       if (ret)
+               return ret;
+
+       l_ctx.xfe = xfe_create(xdev);
+       if (IS_ERR(l_ctx.xfe))
+               return PTR_ERR(l_ctx.xfe);
+
+       INIT_WORK(&l_ctx.xfe->work, xen_fe_irq_handler_dom0_bh);
+
+       return 0;
+}
+
+static void xen_fe_backend_changed(struct xenbus_device *xdev,
+                                  enum xenbus_state be_state)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+
+       mc_dev_devel("be state changed to %d", be_state);
+       switch (be_state) {
+       case XenbusStateUnknown:
+       case XenbusStateInitialising:
+       case XenbusStateInitWait:
+       case XenbusStateInitialised:
+               break;
+       case XenbusStateConnected:
+               if (l_ctx.start())
+                       xenbus_switch_state(xfe->xdev, XenbusStateClosing);
+               else
+                       xenbus_switch_state(xfe->xdev, XenbusStateConnected);
+               break;
+       case XenbusStateClosing:
+       case XenbusStateClosed:
+       case XenbusStateReconfiguring:
+       case XenbusStateReconfigured:
+               break;
+       }
+}
+
+static struct xenbus_driver xen_fe_driver = {
+       .ids  = xen_fe_ids,
+       .probe = xen_fe_probe,
+       .otherend_changed = xen_fe_backend_changed,
+};
+
+int xen_fe_init(int (*probe)(void), int (*start)(void))
+{
+       l_ctx.probe = probe;
+       l_ctx.start = start;
+       mutex_init(&l_ctx.mc_sessions_lock);
+       INIT_LIST_HEAD(&l_ctx.mc_sessions);
+       mutex_init(&l_ctx.gp_operations_lock);
+       INIT_LIST_HEAD(&l_ctx.gp_operations);
+       return xenbus_register_frontend(&xen_fe_driver);
+}
+
+void xen_fe_exit(void)
+{
+       struct tee_xfe *xfe = l_ctx.xfe;
+
+       tee_xfe_put(xfe);
+       xenbus_unregister_driver(&xen_fe_driver);
+}
+
+#endif /* TRUSTONIC_XEN_DOMU */
diff --git a/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_fe.h b/drivers/gud/gud-exynos9610/MobiCoreDriver/xen_fe.h
new file mode 100644 (file)
index 0000000..e16d205
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MC_XEN_FE_H_
+#define _MC_XEN_FE_H_
+
+#include <linux/version.h>
+
+#include "main.h"
+#include "client.h"
+#include "iwp.h"
+#include "mcp.h"
+
+#ifdef TRUSTONIC_XEN_DOMU
+/* MC protocol interface */
+int xen_mc_get_version(struct mc_version_info *version_info);
+int xen_mc_open_session(struct mcp_session *session,
+                       struct mcp_open_info *info);
+int xen_mc_close_session(struct mcp_session *session);
+int xen_mc_map(u32 session_id, struct tee_mmu *mmu, u32 *sva);
+int xen_mc_unmap(u32 session_id, const struct mcp_buffer_map *map);
+int xen_mc_notify(struct mcp_session *session);
+int xen_mc_wait(struct mcp_session *session, s32 timeout, bool silent_expiry);
+int xen_mc_get_err(struct mcp_session *session, s32 *err);
+/* GP protocol interface */
+int xen_gp_register_shared_mem(struct tee_mmu *mmu, u32 *sva,
+                              struct gp_return *gp_ret);
+int xen_gp_release_shared_mem(struct mcp_buffer_map *map);
+int xen_gp_open_session(struct iwp_session *session,
+                       const struct mc_uuid_t *uuid,
+                       const struct iwp_buffer_map *maps,
+                       struct interworld_session *iws,
+                       struct interworld_session *op_iws,
+                       struct gp_return *gp_ret);
+int xen_gp_close_session(struct iwp_session *session);
+int xen_gp_invoke_command(struct iwp_session *session,
+                         const struct iwp_buffer_map *maps,
+                         struct interworld_session *iws,
+                         struct gp_return *gp_ret);
+int xen_gp_request_cancellation(u64 slot);
+
+int xen_fe_init(int (*probe)(void), int (*start)(void));
+void xen_fe_exit(void);
+#else
+static inline int xen_fe_init(int (*probe)(void), int (*start)(void))
+{
+       return 0;
+}
+
+static inline void xen_fe_exit(void)
+{
+}
+#endif
+
+#endif /* _MC_XEN_FE_H_ */
diff --git a/drivers/gud/gud-exynos9610/TlcTui/Makefile b/drivers/gud/gud-exynos9610/TlcTui/Makefile
new file mode 100755 (executable)
index 0000000..6aaae1f
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (c) 2013-2018 TRUSTONIC LIMITED
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+#
+# Makefile for the Kinibi trusted UI driver
+#
+
+GUD_ROOT_FOLDER := drivers/gud/
+
+# add our modules to kernel.
+obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui.o
+
+TlcTui-y := main.o tlcTui.o trustedui.o tui-hal.o
+
+# Release mode by default
+ccflags-y += -DNDEBUG
+ccflags-y += -Wno-declaration-after-statement
+
+ccflags-$(CONFIG_TRUSTONIC_TEE_DEBUG) += -DDEBUG
+
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
+
+# MobiCore TlcTui required includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/TlcTui/inc \
+             -I$(GUD_ROOT_FOLDER)/TlcTui/public
diff --git a/drivers/gud/gud-exynos9610/TlcTui/build_tag.h b/drivers/gud/gud-exynos9610/TlcTui/build_tag.h
new file mode 100644 (file)
index 0000000..d457b35
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MOBICORE_COMPONENT_BUILD_TAG
+#define MOBICORE_COMPONENT_BUILD_TAG \
+       "t-base-Exynos-Android-410a-v001-20180329_190510_46723_76543"
+#endif
diff --git a/drivers/gud/gud-exynos9610/TlcTui/inc/dciTui.h b/drivers/gud/gud-exynos9610/TlcTui/inc/dciTui.h
new file mode 100755 (executable)
index 0000000..9edde42
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DCITUI_H__
+#define __DCITUI_H__
+
+/* Linux checkpatch suggests to use the BIT macro */
+#ifndef BIT
+#define BIT(n) (1U << (n))
+#endif
+
+#ifndef u32
+#define u32 uint32_t
+#endif
+
+#ifndef u64
+#define u64 uint64_t
+#endif
+
+/**< Responses have bit 31 set */
+#define RSP_ID_MASK BIT(31)
+
+#define RSP_ID(cmd_id) (((u32)(cmd_id)) | RSP_ID_MASK)
+#define IS_CMD(cmd_id) ((((u32)(cmd_id)) & RSP_ID_MASK) == 0)
+#define IS_RSP(cmd_id) ((((u32)(cmd_id)) & RSP_ID_MASK) == RSP_ID_MASK)
+#define CMD_ID_FROM_RSP(rsp_id) ((rsp_id) & (~RSP_ID_MASK))
+
+/**
+ * Return codes of driver commands.
+ */
+#define TUI_DCI_OK                      0x00030000
+#define TUI_DCI_ERR_UNKNOWN_CMD         0x00030001
+#define TUI_DCI_ERR_NOT_SUPPORTED       0x00030002
+#define TUI_DCI_ERR_INTERNAL_ERROR      0x00030003
+#define TUI_DCI_ERR_NO_RESPONSE         0x00030004
+#define TUI_DCI_ERR_BAD_PARAMETERS      0x00030005
+#define TUI_DCI_ERR_NO_EVENT            0x00030006
+#define TUI_DCI_ERR_OUT_OF_DISPLAY      0x00030007
+/* ... add more error codes when needed */
+
+/**
+ * Notification ID's for communication Trustlet Connector -> Driver.
+ */
+#define NOT_TUI_NONE                0
+/* NWd system event that closes the current TUI session*/
+#define NOT_TUI_CANCEL_EVENT        1
+/* TODO put this in HAL specific code */
+#define NOT_TUI_HAL_TOUCH_EVENT     0x80000001
+
+/**
+ * Command ID's for communication Driver -> Trustlet Connector.
+ */
+#define CMD_TUI_SW_NONE             0
+/* SWd request to NWd to start the TUI session */
+#define CMD_TUI_SW_OPEN_SESSION     1
+/* SWd request to NWd to close the TUI session */
+#define CMD_TUI_SW_CLOSE_SESSION    2
+/* SWd request to NWd stop accessing display controller */
+#define CMD_TUI_SW_STOP_DISPLAY     3
+/* SWd request to get TlcTui DCI version */
+#define CMD_TUI_SW_GET_VERSION      4
+/* SWd request to NWd to execute a HAL command */
+#define CMD_TUI_SW_HAL              5
+
+#define CMD_TUI_HAL_NONE                    0
+#define CMD_TUI_HAL_QUEUE_BUFFER            1
+#define CMD_TUI_HAL_QUEUE_DEQUEUE_BUFFER    2
+#define CMD_TUI_HAL_CLEAR_TOUCH_INTERRUPT   3
+#define CMD_TUI_HAL_HIDE_SURFACE            4
+#define CMD_TUI_HAL_GET_RESOLUTION          5
+
+/**
+ * Maximum data length.
+ */
+#define MAX_DCI_DATA_LEN (1024 * 100)
+
+/*
+ * TUI DCI VERSION
+ */
+#define TUI_DCI_VERSION_MAJOR   (1u)
+#define TUI_DCI_VERSION_MINOR   (1u)
+
+#define TUI_DCI_VERSION(major, minor) \
+       ((((major) & 0x0000ffff) << 16) | ((minor) & 0x0000ffff))
+#define TUI_DCI_VERSION_GET_MAJOR(version) (((version) >> 16) & 0x0000ffff)
+#define TUI_DCI_VERSION_GET_MINOR(version) ((version) & 0x0000ffff)
+
+/* Command payload */
+
+struct tui_disp_data_t {
+       u32 buff_id;
+};
+
+struct tui_hal_cmd_t {
+       u32 id;    /* Id of the HAL command */
+       u32 size;  /* Size of the data associated to the HAL command */
+       u64 data[2];   /* Data associated to the HAL command */
+};
+
+struct tui_hal_rsp_t {
+       u32 id;    /* Id of the HAL response */
+       u32 return_code;   /* Return code of the HAL response */
+       u32 size;  /* Size of the data associated to the HAL response */
+       u32 data[3];   /* Data associated to the HAL response */
+};
+
+struct tui_alloc_data_t {
+       u32 alloc_size;
+       u32 num_of_buff;
+};
+
+union dci_cmd_payload_t {
+       struct tui_alloc_data_t alloc_data;
+       struct tui_disp_data_t  disp_data;
+       struct tui_hal_cmd_t    hal;
+};
+
+/* Command */
+struct dci_command_t {
+       u32 id;
+       union dci_cmd_payload_t payload;
+};
+
+/* TUI frame buffer (output from NWd) */
+struct tui_alloc_buffer_t {
+       u64    pa;
+};
+
+#define MAX_DCI_BUFFER_NUMBER 4
+
+/* Response */
+struct dci_response_t {
+       u32     id; /* must be command ID | RSP_ID_MASK */
+       u32             return_code;
+       union {
+               struct tui_alloc_buffer_t alloc_buffer[MAX_DCI_BUFFER_NUMBER];
+               struct tui_hal_rsp_t hal_rsp;
+       };
+};
+
+/* DCI buffer */
+struct tui_dci_msg_t {
+       u32 version;
+       u32     nwd_notif; /* Notification from TlcTui to DrTui */
+       struct dci_command_t  cmd_nwd;   /* Command from DrTui to TlcTui */
+       struct dci_response_t nwd_rsp;   /* Response from TlcTui to DrTui */
+       u32     hal_cmd;
+       u32     hal_rsp;
+};
+
+/**
+ * Driver UUID. Update accordingly after reserving UUID
+ */
+#define DR_TUI_UUID { { 7, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
+
+#endif /* __DCITUI_H__ */
diff --git a/drivers/gud/gud-exynos9610/TlcTui/inc/t-base-tui.h b/drivers/gud/gud-exynos9610/TlcTui/inc/t-base-tui.h
new file mode 100755 (executable)
index 0000000..2cd59ac
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TBASE_TUI_H__
+#define __TBASE_TUI_H__
+
+#define TRUSTEDUI_MODE_OFF                0x00
+#define TRUSTEDUI_MODE_ALL                0xff
+#define TRUSTEDUI_MODE_TUI_SESSION    0x01
+#define TRUSTEDUI_MODE_VIDEO_SECURED  0x02
+#define TRUSTEDUI_MODE_INPUT_SECURED  0x04
+
+#ifdef CONFIG_TRUSTONIC_TRUSTED_UI
+
+int trustedui_blank_inc(void);
+int trustedui_blank_dec(void);
+int trustedui_blank_get_counter(void);
+void trustedui_blank_set_counter(int counter);
+
+int trustedui_get_current_mode(void);
+void trustedui_set_mode(int mode);
+int trustedui_set_mask(int mask);
+int trustedui_clear_mask(int mask);
+
+#endif /* CONFIG_TRUSTONIC_TRUSTED_UI */
+
+#endif /* __TBASE_TUI_H__ */
diff --git a/drivers/gud/gud-exynos9610/TlcTui/main.c b/drivers/gud/gud-exynos9610/TlcTui/main.c
new file mode 100755 (executable)
index 0000000..f4c8cc7
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "mobicore_driver_api.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+#include "build_tag.h"
+
+/*static int tui_dev_major_number = 122; */
+
+/*module_param(tui_dev_major_number, int, 0000); */
+/*MODULE_PARM_DESC(major, */
+/* "The device major number used to register a unique char device driver"); */
+
+/* Static variables */
+static struct cdev tui_cdev;
+
+static long tui_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+       int ret = -ENOTTY;
+       int __user *uarg = (int __user *)arg;
+
+       if (_IOC_TYPE(cmd) != TUI_IO_MAGIC)
+               return -EINVAL;
+
+       pr_info("t-base-tui module: ioctl 0x%x ", cmd);
+
+       switch (cmd) {
+       case TUI_IO_NOTIFY:
+               pr_info("TUI_IO_NOTIFY\n");
+
+               if (tlc_notify_event(arg))
+                       ret = 0;
+               else
+                       ret = -EFAULT;
+               break;
+
+       case TUI_IO_WAITCMD: {
+               struct tlc_tui_command_t tui_cmd = {0};
+
+               pr_info("TUI_IO_WAITCMD\n");
+
+               ret = tlc_wait_cmd(&tui_cmd);
+               if (ret) {
+                       pr_debug("ERROR %s:%d tlc_wait_cmd returned (0x%08X)\n",
+                                __func__, __LINE__, ret);
+                       return ret;
+               }
+
+               /* Write command id to user */
+               pr_debug("IOCTL: sending command %d to user.\n", tui_cmd.id);
+
+               if (copy_to_user(uarg, &tui_cmd, sizeof(
+                                               struct tlc_tui_command_t)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+
+               break;
+       }
+
+       case TUI_IO_ACK: {
+               struct tlc_tui_response_t rsp_id;
+
+               pr_info("TUI_IO_ACK\n");
+
+               /* Read user response */
+               if (copy_from_user(&rsp_id, uarg, sizeof(rsp_id)))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+
+               pr_debug("IOCTL: User completed command %d.\n", rsp_id.id);
+               ret = tlc_ack_cmd(&rsp_id);
+               if (ret)
+                       return ret;
+               break;
+       }
+
+       case TUI_IO_INIT_DRIVER: {
+               pr_info("TUI_IO_INIT_DRIVER\n");
+
+               ret = tlc_init_driver();
+               if (ret) {
+                       pr_debug("ERROR %s:%d tlc_init_driver returned (0x%08X)\n",
+                                __func__, __LINE__, ret);
+                       return ret;
+               }
+               break;
+       }
+
+       default:
+               pr_info("ERROR %s:%d Unknown ioctl (%u)!\n", __func__,
+                       __LINE__, cmd);
+               return -ENOTTY;
+       }
+
+       return ret;
+}
+
+atomic_t fileopened;
+
+static int tui_open(struct inode *inode, struct file *file)
+{
+       pr_info("TUI file opened\n");
+       atomic_inc(&fileopened);
+       return 0;
+}
+
+static int tui_release(struct inode *inode, struct file *file)
+{
+       pr_info("TUI file closed\n");
+       if (atomic_dec_and_test(&fileopened))
+               tlc_notify_event(NOT_TUI_CANCEL_EVENT);
+
+       return 0;
+}
+
+static const struct file_operations tui_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = tui_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = tui_ioctl,
+#endif
+       .open = tui_open,
+       .release = tui_release
+};
+
+/*--------------------------------------------------------------------------- */
+static int __init tlc_tui_init(void)
+{
+       pr_info("Loading t-base-tui module.\n");
+       pr_debug("\n=============== Running TUI Kernel TLC ===============\n");
+       pr_info("%s\n", MOBICORE_COMPONENT_BUILD_TAG);
+
+       dev_t devno;
+       int err;
+       static struct class *tui_class;
+
+       atomic_set(&fileopened, 0);
+
+       err = alloc_chrdev_region(&devno, 0, 1, TUI_DEV_NAME);
+       if (err) {
+               pr_debug("Unable to allocate Trusted UI device number\n");
+               return err;
+       }
+
+       cdev_init(&tui_cdev, &tui_fops);
+       tui_cdev.owner = THIS_MODULE;
+       /*    tui_cdev.ops = &tui_fops; */
+
+       err = cdev_add(&tui_cdev, devno, 1);
+       if (err) {
+               pr_debug("Unable to add Trusted UI char device\n");
+               unregister_chrdev_region(devno, 1);
+               return err;
+       }
+
+       tui_class = class_create(THIS_MODULE, "tui_cls");
+       device_create(tui_class, NULL, devno, NULL, TUI_DEV_NAME);
+
+       if (!hal_tui_init())
+               return -EPERM;
+
+       return 0;
+}
+
+static void __exit tlc_tui_exit(void)
+{
+       pr_info("Unloading t-base-tui module.\n");
+
+       unregister_chrdev_region(tui_cdev.dev, 1);
+       cdev_del(&tui_cdev);
+
+       hal_tui_exit();
+}
+
+module_init(tlc_tui_init);
+module_exit(tlc_tui_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Kinibi TUI");
diff --git a/drivers/gud/gud-exynos9610/TlcTui/public/tui_ioctl.h b/drivers/gud/gud-exynos9610/TlcTui/public/tui_ioctl.h
new file mode 100755 (executable)
index 0000000..86f123c
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TUI_IOCTL_H_
+#define TUI_IOCTL_H_
+
+#define MAX_BUFFER_NUMBER 3
+
+#ifndef u32
+#define u32 uint32_t
+#endif
+
+/* Command header */
+struct tlc_tui_command_t {
+       u32     id;
+       u32     data[2];
+};
+
+/* Response header */
+struct tlc_tui_response_t {
+       u32     id;
+       u32     return_code;
+       int     ion_fd[MAX_BUFFER_NUMBER];
+       u32     screen_metrics[3];
+};
+
+/* Command IDs */
+/*  */
+#define TLC_TUI_CMD_NONE                0
+/* Start TUI session */
+#define TLC_TUI_CMD_START_ACTIVITY      1
+/* Stop TUI session */
+#define TLC_TUI_CMD_STOP_ACTIVITY       2
+/*
+ * Queue a buffer
+ * IN: index of buffer to be queued
+ */
+#define TLC_TUI_CMD_QUEUE               3
+/*
+ * Queue a new buffer and dequeue the buffer currently displayed
+ * IN: indexes of buffer to be queued
+ */
+#define TLC_TUI_CMD_QUEUE_DEQUEUE       4
+/*
+ * Alloc buffers
+ * IN: number of buffers
+ * OUT: ion fd
+ */
+#define TLC_TUI_CMD_ALLOC_FB            5
+/* Free buffers */
+#define TLC_TUI_CMD_FREE_FB             6
+/* hide secure surface */
+#define TLC_TUI_CMD_HIDE_SURFACE        7
+#define TLC_TUI_CMD_GET_RESOLUTION      8
+
+/* Return codes */
+#define TLC_TUI_OK                  0
+#define TLC_TUI_ERROR               1
+#define TLC_TUI_ERR_UNKNOWN_CMD     2
+
+/*
+ * defines for the ioctl TUI driver module function call from user space.
+ */
+#define TUI_DEV_NAME   "t-base-tui"
+
+#define TUI_IO_MAGIC   't'
+
+#define TUI_IO_NOTIFY  _IOW(TUI_IO_MAGIC, 1, u32)
+#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, struct tlc_tui_command_t)
+#define TUI_IO_ACK     _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
+#define TUI_IO_INIT_DRIVER     _IO(TUI_IO_MAGIC, 4)
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif
+
+#endif /* TUI_IOCTL_H_ */
diff --git a/drivers/gud/gud-exynos9610/TlcTui/tlcTui.c b/drivers/gud/gud-exynos9610/TlcTui/tlcTui.c
new file mode 100755 (executable)
index 0000000..f9e133f
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include "mobicore_driver_api.h"
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+
+/* ------------------------------------------------------------- */
+/* Globals */
+struct tui_dci_msg_t *dci;
+static DECLARE_COMPLETION(dci_comp);
+static DECLARE_COMPLETION(io_comp);
+
+/* ------------------------------------------------------------- */
+/* Static */
+static const u32 DEVICE_ID = MC_DEVICE_ID_DEFAULT;
+static struct task_struct *thread_id;
+static DEFINE_MUTEX(thread_mutex);
+static struct tlc_tui_command_t g_user_cmd = {.id = TLC_TUI_CMD_NONE};
+static struct mc_session_handle dr_session_handle = {0, 0};
+struct tlc_tui_response_t g_user_rsp = {.id = TLC_TUI_CMD_NONE,
+                               .return_code = TLC_TUI_ERR_UNKNOWN_CMD};
+static bool g_dci_version_checked;
+
+/* Functions */
+
+/* ------------------------------------------------------------- */
+static bool tlc_open_driver(void)
+{
+       bool ret = false;
+       enum mc_result mc_ret;
+       struct mc_uuid_t dr_uuid = DR_TUI_UUID;
+
+       /* Allocate WSM buffer for the DCI */
+       mc_ret = mc_malloc_wsm(DEVICE_ID, 0, sizeof(struct tui_dci_msg_t),
+                              (uint8_t **)&dci, 0);
+       if (mc_ret != MC_DRV_OK) {
+               pr_debug("ERROR %s:%d Allocation of DCI WSM failed: %d\n",
+                        __func__, __LINE__, mc_ret);
+               return false;
+       }
+
+       /* Clear the session handle */
+       memset(&dr_session_handle, 0, sizeof(dr_session_handle));
+       /* The device ID (default device is used */
+       dr_session_handle.device_id = DEVICE_ID;
+       /* Open session with the Driver */
+       mc_ret = mc_open_session(&dr_session_handle, &dr_uuid, (uint8_t *)dci,
+                                (u32)sizeof(struct tui_dci_msg_t));
+       if (mc_ret != MC_DRV_OK) {
+               pr_debug("ERROR %s:%d Open driver session failed: %d\n",
+                        __func__, __LINE__, mc_ret);
+               ret = false;
+       } else {
+               ret = true;
+       }
+
+       return ret;
+}
+
+/* ------------------------------------------------------------- */
+static bool tlc_open(void)
+{
+       bool ret = false;
+       enum mc_result mc_ret;
+
+       /* Open the tbase device */
+       pr_debug("%s: Opening tbase device\n", __func__);
+       mc_ret = mc_open_device(DEVICE_ID);
+
+       /* In case the device is already open, mc_open_device will return an
+        * error (MC_DRV_ERR_INVALID_OPERATION).  But in this case, we can
+        * continue, even though mc_open_device returned an error.  Stop in all
+        * other case of error
+        */
+       if (MC_DRV_OK != mc_ret && MC_DRV_ERR_INVALID_OPERATION != mc_ret) {
+               pr_debug("ERROR %s:%d Error %d opening device\n", __func__,
+                        __LINE__, mc_ret);
+               return false;
+       }
+
+       pr_debug("%s: Opening driver session\n", __func__);
+       ret = tlc_open_driver();
+
+       return ret;
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_wait_cmd_from_driver(void)
+{
+       u32 ret = TUI_DCI_ERR_INTERNAL_ERROR;
+
+       /* Wait for a command from secure driver */
+       ret = mc_wait_notification(&dr_session_handle, -1);
+       if (ret == MC_DRV_OK)
+               pr_debug("%s: Got a command\n", __func__);
+       else
+               pr_debug("ERROR %s:%d mc_wait_notification() failed: %d\n",
+                        __func__, __LINE__, ret);
+}
+
+struct mc_session_handle *get_session_handle(void)
+{
+       return &dr_session_handle;
+}
+
+u32 send_cmd_to_user(u32 command_id, u32 data0, u32 data1)
+{
+       u32 ret = TUI_DCI_ERR_NO_RESPONSE;
+       int retry = 10;
+
+       /* Init shared variables */
+       g_user_cmd.id = command_id;
+       g_user_cmd.data[0] = data0;
+       g_user_cmd.data[1] = data1;
+       /* Erase the rsp struct */
+       memset(&g_user_rsp, 0, sizeof(g_user_rsp));
+       g_user_rsp.id = TLC_TUI_CMD_NONE;
+       g_user_rsp.return_code = TLC_TUI_ERR_UNKNOWN_CMD;
+
+       while (!atomic_read(&fileopened) && retry--) {
+               msleep(100);
+               pr_debug("sleep for atomic_read(&fileopened) with retry = %d\n",
+                        retry);
+       }
+
+       /*
+        * Check that the client (TuiService) is still present before to return
+        * the command.
+        */
+       if (atomic_read(&fileopened)) {
+               /* Clean up previous response. */
+               complete_all(&io_comp);
+               reinit_completion(&io_comp);
+
+               /*
+                * Unlock the ioctl thread (IOCTL_WAIT) in order to let the
+                * client know that there is a command to process.
+                */
+               pr_info("%s: give way to ioctl thread\n", __func__);
+               complete(&dci_comp);
+               pr_info("TUI TLC is running, waiting for the userland response\n");
+               /* Wait for the client acknowledge (IOCTL_ACK). */
+               unsigned long completed = wait_for_completion_timeout(&io_comp,
+                               msecs_to_jiffies(5000));
+               if (!completed) {
+                       pr_debug("%s:%d No acknowledge from client, timeout!\n",
+                                __func__, __LINE__);
+               }
+       } else {
+               /*
+                * There is no client, do nothing except reporting an error to
+                * SWd.
+                */
+               pr_info("TUI TLC seems dead. Not waiting for userland answer\n");
+               ret = TUI_DCI_ERR_INTERNAL_ERROR;
+               goto end;
+       }
+
+       pr_debug("%s: Got an answer from ioctl thread.\n", __func__);
+       reinit_completion(&io_comp);
+
+       /* Check id of the cmd processed by ioctl thread (paranoia) */
+       if (g_user_rsp.id != command_id) {
+               pr_debug("ERROR %s:%d Wrong response id 0x%08x iso 0x%08x\n",
+                        __func__, __LINE__, dci->nwd_rsp.id,
+                        (u32)RSP_ID(command_id));
+               ret = TUI_DCI_ERR_INTERNAL_ERROR;
+       } else {
+               /* retrieve return code */
+               switch (g_user_rsp.return_code) {
+               case TLC_TUI_OK:
+                       ret = TUI_DCI_OK;
+                       break;
+               case TLC_TUI_ERROR:
+                       ret = TUI_DCI_ERR_INTERNAL_ERROR;
+                       break;
+               case TLC_TUI_ERR_UNKNOWN_CMD:
+                       ret = TUI_DCI_ERR_UNKNOWN_CMD;
+                       break;
+               }
+       }
+
+end:
+       /*
+        * In any case, reset the value of the command, to ensure that commands
+        * sent due to inturrupted wait_for_completion are TLC_TUI_CMD_NONE.
+        */
+       reset_global_command_id();
+       return ret;
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_process_cmd(void)
+{
+       u32 ret = TUI_DCI_ERR_INTERNAL_ERROR;
+       u32 command_id = CMD_TUI_SW_NONE;
+
+       if (!dci) {
+               pr_debug("ERROR %s:%d DCI has not been set up properly - exiting\n",
+                        __func__, __LINE__);
+               return;
+       }
+
+       command_id = dci->cmd_nwd.id;
+
+       if (dci->hal_rsp)
+               hal_tui_notif();
+
+       /* Warn if previous response was not acknowledged */
+       if (command_id == CMD_TUI_SW_NONE) {
+               pr_debug("ERROR %s:%d Notified without command\n", __func__,
+                        __LINE__);
+               return;
+       }
+
+       if (dci->nwd_rsp.id != CMD_TUI_SW_NONE)
+               pr_debug("%s: Warning, previous response not ack\n",
+                        __func__);
+
+       /* Handle command */
+       switch (command_id) {
+       case CMD_TUI_SW_OPEN_SESSION:
+               pr_debug("%s: CMD_TUI_SW_OPEN_SESSION.\n", __func__);
+
+               if (!g_dci_version_checked) {
+                       pr_info("ERROR %s:%d DrTui version is not compatible!\n",
+                               __func__, __LINE__);
+                       ret = TUI_DCI_ERR_INTERNAL_ERROR;
+                       break;
+               }
+               /* Start android TUI activity */
+               ret = send_cmd_to_user(
+                       TLC_TUI_CMD_START_ACTIVITY,
+                       dci->cmd_nwd.payload.alloc_data.num_of_buff,
+                       dci->cmd_nwd.payload.alloc_data.alloc_size);
+               if (ret != TUI_DCI_OK)
+                       break;
+
+/*****************************************************************************/
+
+               /* Alloc work buffer separately and send it as last buffer */
+               ret = hal_tui_alloc(dci->nwd_rsp.alloc_buffer,
+                                   dci->cmd_nwd.payload.alloc_data.alloc_size,
+                                  dci->cmd_nwd.payload.alloc_data.num_of_buff);
+               if (ret != TUI_DCI_OK) {
+                       pr_debug("%s: hal_tui_alloc() failed (0x%08X)",
+                                __func__, ret);
+                       send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY, 0, 0);
+                       break;
+               }
+
+               /* Deactivate linux UI drivers */
+               ret = hal_tui_deactivate();
+
+               if (ret != TUI_DCI_OK) {
+                       hal_tui_free();
+                       send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY, 0, 0);
+                       break;
+               }
+
+               break;
+
+       case CMD_TUI_SW_GET_VERSION: {
+               pr_debug("%s: CMD_TUI_SW_GET_VERSION.\n", __func__);
+               u32 drtui_dci_version = dci->version;
+               u32 tlctui_dci_version =
+                       TUI_DCI_VERSION(TUI_DCI_VERSION_MAJOR,
+                                       TUI_DCI_VERSION_MINOR);
+               pr_info("%s: TlcTui DCI Version (%u.%u)\n",  __func__,
+                       TUI_DCI_VERSION_GET_MAJOR(tlctui_dci_version),
+                       TUI_DCI_VERSION_GET_MINOR(tlctui_dci_version));
+               pr_info("%s: DrTui DCI Version (%u.%u)\n",  __func__,
+                       TUI_DCI_VERSION_GET_MAJOR(drtui_dci_version),
+                       TUI_DCI_VERSION_GET_MINOR(drtui_dci_version));
+               /* Write the TlcTui DCI version in the response for the SWd */
+               dci->version = tlctui_dci_version;
+               g_dci_version_checked = true;
+               ret = TUI_DCI_OK;
+               break;
+       }
+
+       case CMD_TUI_SW_HAL:
+               /* TODO Always answer, even if there is a cancel!! */
+               ret = hal_tui_process_cmd(&dci->cmd_nwd.payload.hal,
+                                         &dci->nwd_rsp.hal_rsp);
+               break;
+
+       case CMD_TUI_SW_CLOSE_SESSION:
+               pr_debug("%s: CMD_TUI_SW_CLOSE_SESSION.\n", __func__);
+
+               /* QC: close ion client before activating linux UI */
+               hal_tui_free();
+
+               /* Activate linux UI drivers */
+               ret = hal_tui_activate();
+
+               /* Stop android TUI activity */
+               /* Ignore return code, because an error means the TLC has been
+                * killed, which imply that the activity is stopped already.
+                */
+               send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY, 0, 0);
+               ret = TUI_DCI_OK;
+
+               break;
+
+       default:
+               pr_debug("ERROR %s:%d Unknown command %d\n",
+                        __func__, __LINE__, command_id);
+               ret = TUI_DCI_ERR_UNKNOWN_CMD;
+               break;
+       }
+
+       /* Fill in response to SWd, fill ID LAST */
+       pr_debug("%s: return 0x%08x to cmd 0x%08x\n",
+                __func__, ret, command_id);
+       /* TODO: fill data fields of pDci->nwdRsp */
+       dci->nwd_rsp.return_code = ret;
+       dci->nwd_rsp.id = RSP_ID(command_id);
+
+       /* Acknowledge command */
+       dci->cmd_nwd.id = CMD_TUI_SW_NONE;
+
+       /* Notify SWd */
+       pr_debug("DCI RSP NOTIFY CORE\n");
+       ret = mc_notify(&dr_session_handle);
+       if (ret != MC_DRV_OK)
+               pr_debug("ERROR %s:%d Notify failed: %d\n", __func__, __LINE__,
+                        ret);
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_close_driver(void)
+{
+       enum mc_result ret;
+
+       /* Close session with the Driver */
+       ret = mc_close_session(&dr_session_handle);
+       if (ret != MC_DRV_OK) {
+               pr_debug("ERROR %s:%d Closing driver session failed: %d\n",
+                        __func__, __LINE__, ret);
+       }
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_close(void)
+{
+       enum mc_result ret;
+
+       pr_debug("%s: Closing driver session\n", __func__);
+       tlc_close_driver();
+
+       pr_debug("%s: Closing tbase\n", __func__);
+       /* Close the tbase device */
+       ret = mc_close_device(DEVICE_ID);
+       if (ret != MC_DRV_OK) {
+               pr_debug("ERROR %s:%d Closing tbase device failed: %d\n",
+                        __func__, __LINE__, ret);
+       }
+}
+
+void reset_global_command_id(void)
+{
+       g_user_cmd.id = TLC_TUI_CMD_NONE;
+}
+
+/* ------------------------------------------------------------- */
+
+bool tlc_notify_event(u32 event_type)
+{
+       bool ret = false;
+       enum mc_result result;
+
+       if (!dci) {
+               pr_warn("%s: DCI has not been set up properly - exiting\n",
+                       __func__);
+               return false;
+       }
+
+       /* Prepare notification message in DCI */
+       pr_debug("%s: event_type = %d\n", __func__, event_type);
+       dci->nwd_notif = event_type;
+
+       /* Signal the Driver */
+       pr_debug("DCI EVENT NOTIFY CORE\n");
+       result = mc_notify(&dr_session_handle);
+       if (result != MC_DRV_OK) {
+               pr_err("%s: mc_notify failed: %d\n", __func__, result);
+               ret = false;
+       } else {
+               ret = true;
+       }
+
+       return ret;
+}
+
+/* ------------------------------------------------------------- */
+/**
+ */
+static int main_thread(void *uarg)
+{
+       pr_debug("%s: TlcTui start!\n", __func__);
+
+       /* Open session on the driver */
+       if (!tlc_open()) {
+               pr_err("%s: open driver failed!\n", __func__);
+               return 1;
+       }
+
+       /* TlcTui main thread loop */
+       for (;;) {
+               /* Wait for a command from the DrTui on DCI */
+               tlc_wait_cmd_from_driver();
+               /* Something has been received, process it. */
+               tlc_process_cmd();
+       }
+
+       /*
+        * Close tlc. Note that this frees the DCI pointer.
+        * Do not use this pointer after tlc_close().
+        */
+       tlc_close();
+
+       return 0;
+}
+
+static int start_thread_if_needed(void)
+{
+       int rc = 0;
+
+       /*
+        * Create the TlcTui Main thread and start secure driver (only 1st time)
+        */
+       mutex_lock(&thread_mutex);
+       if (thread_id)
+               /* Already started */
+               goto end;
+
+       thread_id = kthread_run(main_thread, NULL, "tee_tui");
+       if (IS_ERR_OR_NULL(thread_id)) {
+               rc = PTR_ERR(thread_id);
+               pr_err("Unable to start Trusted UI main thread: %d\n", rc);
+               thread_id = NULL;
+       }
+
+end:
+       mutex_unlock(&thread_mutex);
+       return rc;
+}
+
+int tlc_wait_cmd(struct tlc_tui_command_t *cmd_id)
+{
+       int ret = start_thread_if_needed();
+
+       if (ret)
+               return ret;
+
+       /* Wait for signal from DCI handler */
+       /* In case of an interrupted sys call, return with -EINTR */
+       if (wait_for_completion_interruptible(&dci_comp)) {
+               pr_debug("interrupted by system\n");
+               return -ERESTARTSYS;
+       }
+       reinit_completion(&dci_comp);
+
+       *cmd_id = g_user_cmd;
+       return 0;
+}
+
+int tlc_init_driver(void)
+{
+       return start_thread_if_needed();
+}
+
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp)
+{
+       g_user_rsp = *rsp;
+
+       if (g_user_rsp.id == TLC_TUI_CMD_ALLOC_FB)
+               hal_tui_post_start(&g_user_rsp);
+
+       /* Send signal to DCI */
+       complete(&io_comp);
+
+       return 0;
+}
+
+/** @} */
diff --git a/drivers/gud/gud-exynos9610/TlcTui/tlcTui.h b/drivers/gud/gud-exynos9610/TlcTui/tlcTui.h
new file mode 100755 (executable)
index 0000000..bc36adb
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TLCTUI_H_
+#define TLCTUI_H_
+
+#include "tui_ioctl.h"
+#define TUI_MOD_TAG "t-base-tui "
+
+#define ION_PHYS_WORKING_BUFFER_IDX (0)
+#define ION_PHYS_FRAME_BUFFER_IDX   (1)
+
+void reset_global_command_id(void);
+int tlc_wait_cmd(struct tlc_tui_command_t *cmd);
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id);
+bool tlc_notify_event(u32 event_type);
+int tlc_init_driver(void);
+u32 send_cmd_to_user(u32 command_id, u32 data0, u32 data1);
+struct mc_session_handle *get_session_handle(void);
+
+extern atomic_t fileopened;
+extern struct tui_dci_msg_t *dci;
+extern struct tlc_tui_response_t g_user_rsp;
+extern u64 g_ion_phys[MAX_BUFFER_NUMBER];
+extern u32 g_ion_size[MAX_BUFFER_NUMBER];
+#endif /* TLCTUI_H_ */
diff --git a/drivers/gud/gud-exynos9610/TlcTui/trustedui.c b/drivers/gud/gud-exynos9610/TlcTui/trustedui.c
new file mode 100755 (executable)
index 0000000..9901122
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/t-base-tui.h>
+
+static int trustedui_mode = TRUSTEDUI_MODE_OFF;
+static int trustedui_blank_counter;
+
+static DEFINE_SPINLOCK(trustedui_lock);
+
+int trustedui_blank_inc(void)
+{
+       unsigned long flags;
+       int newvalue;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       newvalue = ++trustedui_blank_counter;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+
+       return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_inc);
+
+int trustedui_blank_dec(void)
+{
+       unsigned long flags;
+       int newvalue;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       newvalue = --trustedui_blank_counter;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+
+       return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_dec);
+
+int trustedui_blank_get_counter(void)
+{
+       unsigned long flags;
+       int newvalue;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       newvalue = trustedui_blank_counter;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+
+       return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_get_counter);
+
+void trustedui_blank_set_counter(int counter)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       trustedui_blank_counter = counter;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_blank_set_counter);
+
+int trustedui_get_current_mode(void)
+{
+       unsigned long flags;
+       int mode;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       mode = trustedui_mode;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+
+       return mode;
+}
+EXPORT_SYMBOL(trustedui_get_current_mode);
+
+void trustedui_set_mode(int mode)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       trustedui_mode = mode;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_set_mode);
+
+int trustedui_set_mask(int mask)
+{
+       unsigned long flags;
+       int mode;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       mode = trustedui_mode |= mask;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+
+       return mode;
+}
+EXPORT_SYMBOL(trustedui_set_mask);
+
+int trustedui_clear_mask(int mask)
+{
+       unsigned long flags;
+       int mode;
+
+       spin_lock_irqsave(&trustedui_lock, flags);
+       mode = trustedui_mode &= ~mask;
+       spin_unlock_irqrestore(&trustedui_lock, flags);
+
+       return mode;
+}
+EXPORT_SYMBOL(trustedui_clear_mask);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Kinibi TUI");
diff --git a/drivers/gud/gud-exynos9610/TlcTui/tui-hal.c b/drivers/gud/gud-exynos9610/TlcTui/tui-hal.c
new file mode 100755 (executable)
index 0000000..5277c6b
--- /dev/null
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2014-2017 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fb.h>
+
+#ifndef CONFIG_TRUSTONIC_TRUSTED_UI
+#define CONFIG_TRUSTONIC_TRUSTED_UI
+#endif
+#include <t-base-tui.h>
+
+#include "tui_ioctl.h"
+#include "dciTui.h"
+#include "tlcTui.h"
+#include "tui-hal.h"
+
+#define TUI_MEMPOOL_SIZE 0
+
+struct tui_mempool {
+       void *va;
+       unsigned long pa;
+       size_t size;
+};
+
+static struct tui_mempool g_tui_mem_pool;
+
+/* basic implementation of a memory pool for TUI framebuffer.  This
+ * implementation is using kmalloc, for the purpose of demonstration only.
+ * A real implementation might prefer using more advanced allocator, like ION,
+ * in order not to exhaust memory available to kmalloc
+ */
+static bool allocate_tui_memory_pool(struct tui_mempool *pool, size_t size)
+{
+       bool ret = false;
+       void *tui_mem_pool = NULL;
+
+       pr_info("%s %s:%d\n", __func__, __FILE__, __LINE__);
+       if (!size) {
+               pr_debug("TUI frame buffer: nothing to allocate.");
+               return true;
+       }
+
+       tui_mem_pool = kmalloc(size, GFP_KERNEL);
+       if (!tui_mem_pool) {
+               return ret;
+       } else if (ksize(tui_mem_pool) < size) {
+               pr_err("TUI mem pool size too small: req'd=%zu alloc'd=%zu",
+                      size, ksize(tui_mem_pool));
+               kfree(tui_mem_pool);
+       } else {
+               pool->va = tui_mem_pool;
+               pool->pa = virt_to_phys(tui_mem_pool);
+               pool->size = ksize(tui_mem_pool);
+               ret = true;
+       }
+       return ret;
+}
+
+static void free_tui_memory_pool(struct tui_mempool *pool)
+{
+       kfree(pool->va);
+       memset(pool, 0, sizeof(*pool));
+}
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+u32 hal_tui_init(void)
+{
+       /* Allocate memory pool for the framebuffer
+        */
+       if (!allocate_tui_memory_pool(&g_tui_mem_pool, TUI_MEMPOOL_SIZE))
+               return TUI_DCI_ERR_INTERNAL_ERROR;
+
+       return TUI_DCI_OK;
+}
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void hal_tui_exit(void)
+{
+       /* delete memory pool if any */
+       if (g_tui_mem_pool.va)
+               free_tui_memory_pool(&g_tui_mem_pool);
+}
+
+/**
+ * hal_tui_alloc() - allocator for secure framebuffer and working buffer
+ * @allocbuffer:    input parameter that the allocator fills with the physical
+ *                  addresses of the allocated buffers
+ * @allocsize:      size of the buffer to allocate.  All the buffer are of the
+ *                  same size
+ * @number:         Number to allocate.
+ *
+ * This function is called when the module receives a CMD_TUI_SW_OPEN_SESSION
+ * message from the secure driver.  The function must allocate 'number'
+ * buffer(s) of physically contiguous memory, where the length of each buffer
+ * is at least 'allocsize' bytes.  The physical address of each buffer must be
+ * stored in the array of structure 'allocbuffer' which is provided as
+ * arguments.
+ *
+ * Physical address of the first buffer must be put in allocate[0].pa , the
+ * second one on allocbuffer[1].pa, and so on.  The function must return 0 on
+ * success, non-zero on error.  For integrations where the framebuffer is not
+ * allocated by the Normal World, this function should do nothing and return
+ * success (zero).
+ * If the working buffer allocation is different from framebuffers, ensure that
+ * the physical address of the working buffer is at index 0 of the allocbuffer
+ * table (allocbuffer[0].pa).
+ */
+u32 hal_tui_alloc(
+       struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
+       size_t allocsize, u32 number)
+{
+       u32 ret = TUI_DCI_ERR_INTERNAL_ERROR;
+
+       if (!allocbuffer) {
+               pr_debug("%s(%d): allocbuffer is null\n", __func__, __LINE__);
+               return TUI_DCI_ERR_INTERNAL_ERROR;
+       }
+
+       pr_debug("%s(%d): Requested size=0x%zx x %u chunks\n",
+                __func__, __LINE__, allocsize, number);
+
+       if ((size_t)allocsize == 0) {
+               pr_debug("%s(%d): Nothing to allocate\n", __func__, __LINE__);
+               return TUI_DCI_OK;
+       }
+
+       if (number != 2) {
+               pr_debug("%s(%d): Unexpected number of buffers requested\n",
+                        __func__, __LINE__);
+               return TUI_DCI_ERR_INTERNAL_ERROR;
+       }
+
+       if ((size_t)(allocsize * number) <= g_tui_mem_pool.size) {
+               /* requested buffer fits in the memory pool */
+               allocbuffer[0].pa = (u64)g_tui_mem_pool.pa;
+               allocbuffer[1].pa = (u64)(g_tui_mem_pool.pa +
+                                              g_tui_mem_pool.size / 2);
+               pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+                        allocbuffer[0].pa);
+               pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+                        allocbuffer[1].pa);
+               ret = TUI_DCI_OK;
+       } else {
+               /*
+                * requested buffer is bigger than the memory pool, return an
+                * error
+                */
+               pr_debug("%s(%d): Memory pool too small\n", __func__, __LINE__);
+               ret = TUI_DCI_ERR_INTERNAL_ERROR;
+       }
+
+       return ret;
+}
+
+/**
+ * hal_tui_free() - free memory allocated by hal_tui_alloc()
+ *
+ * This function is called at the end of the TUI session, when the TUI module
+ * receives the CMD_TUI_SW_CLOSE_SESSION message. The function should free the
+ * buffers allocated by hal_tui_alloc(...).
+ */
+void hal_tui_free(void)
+{
+}
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+u32 hal_tui_deactivate(void)
+{
+       /* Set linux TUI flag */
+       trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
+       /*
+        * Stop NWd display here.  After this function returns, SWd will take
+        * control of the display and input.  Therefore the NWd should no longer
+        * access it
+        * This can be done by calling the fb_blank(FB_BLANK_POWERDOWN) function
+        * on the appropriate framebuffer device
+        */
+       trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED |
+                          TRUSTEDUI_MODE_INPUT_SECURED);
+
+       return TUI_DCI_OK;
+}
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+u32 hal_tui_activate(void)
+{
+       /* Protect NWd */
+       trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED |
+                            TRUSTEDUI_MODE_INPUT_SECURED);
+       /*
+        * Restart NWd display here.  TUI session has ended, and therefore the
+        * SWd will no longer use display and input.
+        * This can be done by calling the fb_blank(FB_BLANK_UNBLANK) function
+        * on the appropriate framebuffer device
+        */
+       /* Clear linux TUI flag */
+       trustedui_set_mode(TRUSTEDUI_MODE_OFF);
+       return TUI_DCI_OK;
+}
+
+/* Do nothing it's only use for QC */
+u32 hal_tui_process_cmd(struct tui_hal_cmd_t *cmd, struct tui_hal_rsp_t *rsp)
+{
+       return TUI_DCI_OK;
+}
+
+/* Do nothing it's only use for QC */
+u32 hal_tui_notif(void)
+{
+       return TUI_DCI_OK;
+}
+
+/* Do nothing it's only use for QC */
+void hal_tui_post_start(struct tlc_tui_response_t *rsp)
+{
+       pr_info("%s(%d)\n", __func__, __LINE__);
+}
diff --git a/drivers/gud/gud-exynos9610/TlcTui/tui-hal.h b/drivers/gud/gud-exynos9610/TlcTui/tui-hal.h
new file mode 100755 (executable)
index 0000000..13c1b51
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2014-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TUI_HAL_H_
+#define _TUI_HAL_H_
+
+#include <linux/types.h>
+#include "tui_ioctl.h"
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+uint32_t hal_tui_init(void);
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void hal_tui_exit(void);
+
+/**
+ * hal_tui_alloc() - allocator for secure framebuffer and working buffer
+ * @allocbuffer:    input parameter that the allocator fills with the physical
+ *                  addresses of the allocated buffers
+ * @allocsize:      size of the buffer to allocate.  All the buffer are of the
+ *                  same size
+ * @number:         Number to allocate.
+ *
+ * This function is called when the module receives a CMD_TUI_SW_OPEN_SESSION
+ * message from the secure driver.  The function must allocate 'number'
+ * buffer(s) of physically contiguous memory, where the length of each buffer
+ * is at least 'allocsize' bytes.  The physical address of each buffer must be
+ * stored in the array of structure 'allocbuffer' which is provided as
+ * arguments.
+ *
+ * Physical address of the first buffer must be put in allocate[0].pa , the
+ * second one on allocbuffer[1].pa, and so on.  The function must return 0 on
+ * success, non-zero on error.  For integrations where the framebuffer is not
+ * allocated by the Normal World, this function should do nothing and return
+ * success (zero).
+ * If the working buffer allocation is different from framebuffers, ensure that
+ * the physical address of the working buffer is at index 0 of the allocbuffer
+ * table (allocbuffer[0].pa).
+ */
+uint32_t hal_tui_alloc(
+       struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
+       size_t allocsize, uint32_t number);
+
+/**
+ * hal_tui_free() - free memory allocated by hal_tui_alloc()
+ *
+ * This function is called at the end of the TUI session, when the TUI module
+ * receives the CMD_TUI_SW_CLOSE_SESSION message. The function should free the
+ * buffers allocated by hal_tui_alloc(...).
+ */
+void hal_tui_free(void);
+
+void hal_tui_post_start(struct tlc_tui_response_t *rsp);
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t hal_tui_deactivate(void);
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t hal_tui_activate(void);
+uint32_t hal_tui_process_cmd(struct tui_hal_cmd_t *cmd,
+                            struct tui_hal_rsp_t *rsp);
+uint32_t hal_tui_notif(void);
+
+/**
+ * hal_tui_process_cmd() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives a command from the
+ * secure driver HAL, ie when drTuiCoreDciSendAndWait() is called.
+ */
+uint32_t hal_tui_process_cmd(struct tui_hal_cmd_t *cmd,
+                            struct tui_hal_rsp_t *rsp);
+
+/**
+ * hal_tui_notif() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives an answer from the
+ * secure driver HAL (the hal_rsp field of the world shared memory struct is
+ * not null).
+ * This should be the way to get an answer from the secure driver after a
+ * command has been sent to it (the hal_cmd field of the world shared memory
+ * struct has been set and a notification has been raised).
+ */
+uint32_t hal_tui_notif(void);
+
+/**
+ * hal_tui_process_cmd() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives a command from the
+ * secure driver HAL, ie when drTuiCoreDciSendAndWait() is called.
+ */
+uint32_t hal_tui_process_cmd(struct tui_hal_cmd_t *cmd,
+                            struct tui_hal_rsp_t *rsp);
+
+/**
+ * hal_tui_notif() - integrator specific exit code for kernel module
+ *
+ * This function is called when kernel module receives an answer from the
+ * secure driver HAL (the hal_rsp field of the world shared memory struct is
+ * not null).
+ * This should be the way to get an answer from the secure driver after a
+ * command has been sent to it (the hal_cmd field of the world shared memory
+ * struct has been set and a notification has been raised).
+ */
+uint32_t hal_tui_notif(void);
+
+#endif
diff --git a/drivers/gud/gud-exynos9610/sec-os-booster/Kconfig b/drivers/gud/gud-exynos9610/sec-os-booster/Kconfig
new file mode 100755 (executable)
index 0000000..d92aac2
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Secure OS control configuration
+#
+config SECURE_OS_BOOSTER_API
+       bool "Secure OS booster API"
+       depends on TRUSTONIC_TEE
+       ---help---
+         The secure OS booster API is used for secure OS performance
+         enhancement. It can migrate a core that executes secure OS tasks
+         and lock CPU frequency.
+
+config SECURE_OS_SUPPORT_MCT_DISABLE
+       bool "Seucre OS booster API supports MCT disable"
+       depends on TRUSTONIC_TEE
+       default n
+       ---help---
+          When secure OS boosting, MCT is needed to be disabled
+          because of preventing interrupt of MCT.
diff --git a/drivers/gud/gud-exynos9610/sec-os-booster/Makefile b/drivers/gud/gud-exynos9610/sec-os-booster/Makefile
new file mode 100755 (executable)
index 0000000..8d87a9e
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for Secure OS booster API
+#
+obj-$(CONFIG_SECURE_OS_BOOSTER_API) += sec_os_booster.o
+
+# MobiCore kernel driver path
+GUD_ROOT_FOLDER := drivers/gud/gud-exynos9610
+
+ccflags-y += -Iinclude/soc/samsung/
+# Includes MobiCore kernel driver
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
diff --git a/drivers/gud/gud-exynos9610/sec-os-booster/sec_os_booster.c b/drivers/gud/gud-exynos9610/sec-os-booster/sec_os_booster.c
new file mode 100755 (executable)
index 0000000..827eadc
--- /dev/null
@@ -0,0 +1,328 @@
+/* drivers/gud/sec-os-ctrl/secos_booster.c
+ *
+ * Secure OS booster driver for Samsung Exynos
+ *
+ * Copyright (c) 2014 Samsung Electronics
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/of_gpio.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/pm_qos.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/suspend.h>
+
+#include "secos_booster.h"
+#include <linux/cpufreq.h>
+
+#include "platform.h"
+
+#define BOOST_POLICY_OFFSET    0
+#define BOOST_TIME_OFFSET      16
+
+#define NS_DIV_MS (1000ull * 1000ull)
+#define WAIT_TIME (10ull * NS_DIV_MS)
+
+int nq_switch_core(uint32_t cpu);
+void mc_set_schedule_policy(int core);
+uint32_t mc_active_core(void);
+
+int mc_boost_usage_count;
+struct mutex boost_lock;
+
+unsigned int current_core;
+
+unsigned int is_suspend_prepared;
+
+struct timer_work {
+       struct kthread_work work;
+};
+
+static struct pm_qos_request secos_booster_cluster1_qos;
+static struct hrtimer timer;
+static int max_cpu_freq;
+
+static struct task_struct *mc_timer_thread;    /* Timer Thread task structure */
+static DEFINE_KTHREAD_WORKER(mc_timer_worker);
+static struct hrtimer mc_hrtimer;
+
+static enum hrtimer_restart mc_hrtimer_func(struct hrtimer *timer)
+{
+#ifdef CONFIG_SECURE_OS_SUPPORT_MCT_DISABLE
+       struct irq_desc *desc = irq_to_desc(MC_INTR_LOCAL_TIMER);
+
+       if (desc->depth != 0)
+               enable_irq(MC_INTR_LOCAL_TIMER);
+#endif
+
+       return HRTIMER_NORESTART;
+}
+
+static void mc_timer_work_func(struct kthread_work *work)
+{
+       hrtimer_start(&mc_hrtimer, ns_to_ktime((u64)LOCAL_TIMER_PERIOD * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+int secos_booster_request_pm_qos(struct pm_qos_request *req, s32 freq)
+{
+       static ktime_t recent_qos_req_time;
+       ktime_t current_time;
+       unsigned long long ns;
+
+       current_time = ktime_get();
+
+       ns = ktime_to_ns(ktime_sub(current_time, recent_qos_req_time));
+
+       if (ns > 0 && WAIT_TIME > ns) {
+               pr_info("%s: recalling time is too short. wait %lldms\n", __func__, (WAIT_TIME - ns) / NS_DIV_MS + 1);
+               msleep((WAIT_TIME - ns) / NS_DIV_MS + 1);
+       }
+
+       pm_qos_update_request(req, freq);
+
+       recent_qos_req_time = ktime_get();
+
+       return 0;
+}
+
+int mc_timer(void)
+{
+       struct timer_work t_work = {
+               KTHREAD_WORK_INIT(t_work.work, mc_timer_work_func),
+       };
+
+       if (!kthread_queue_work(&mc_timer_worker, &t_work.work))
+               return false;
+
+       kthread_flush_work(&t_work.work);
+       return true;
+}
+
+static int mc_timer_init(void)
+{
+       cpumask_t cpu;
+
+       mc_timer_thread = kthread_create(kthread_worker_fn, &mc_timer_worker, "mc_timer");
+       if (IS_ERR(mc_timer_thread)) {
+               mc_timer_thread = NULL;
+               pr_err("%s: timer thread creation failed!", __func__);
+               return -EFAULT;
+       }
+
+       wake_up_process(mc_timer_thread);
+
+       cpumask_setall(&cpu);
+       cpumask_clear_cpu(MIGRATE_TARGET_CORE, &cpu);
+       /* ExySp */
+       set_cpus_allowed_ptr(mc_timer_thread, &cpu);
+
+       hrtimer_init(&mc_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       mc_hrtimer.function = mc_hrtimer_func;
+
+       return 0;
+}
+
+static void stop_wq(struct work_struct *work)
+{
+       int ret;
+
+       ret = secos_booster_stop();
+       if (ret)
+               pr_err("%s: secos_booster_stop failed. err:%d\n", __func__, ret);
+
+       return;
+}
+
+static DECLARE_WORK(stopwq, stop_wq);
+
+static enum hrtimer_restart secos_booster_hrtimer_fn(struct hrtimer *timer)
+{
+       schedule_work_on(0, &stopwq);
+
+       return HRTIMER_NORESTART;
+}
+
+int secos_booster_start(enum secos_boost_policy policy)
+{
+       int ret = 0;
+       int freq;
+       uint32_t boost_time;    /* milli second */
+       enum secos_boost_policy boost_policy;
+
+       mutex_lock(&boost_lock);
+       mc_boost_usage_count++;
+
+       if (mc_boost_usage_count > 1) {
+               goto out;
+       } else if (mc_boost_usage_count <= 0) {
+               pr_err("boost usage count sync error. count : %d\n", mc_boost_usage_count);
+               mc_boost_usage_count = 0;
+               ret = -EINVAL;
+               goto error;
+       }
+
+       current_core = mc_active_core();
+
+       boost_time = (((uint32_t)policy) >> BOOST_TIME_OFFSET) & 0xFFFF;
+       boost_policy = (((uint32_t)policy) >> BOOST_POLICY_OFFSET) & 0xFFFF;
+
+       /* migrate to big Core */
+       if (boost_policy >= PERFORMANCE_MAX_CNT || boost_policy < 0) {
+               pr_err("%s: wrong secos boost policy:%d\n", __func__, boost_policy);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       /* cpufreq configuration */
+       if (boost_policy == MAX_PERFORMANCE)
+               freq = max_cpu_freq;
+       else if (boost_policy == MID_PERFORMANCE)
+               freq = max_cpu_freq;
+       else if (boost_policy == STB_PERFORMANCE)
+               freq = max_cpu_freq;
+       else
+               freq = 0;
+
+
+       if (!cpu_online(MIGRATE_TARGET_CORE)) {
+               pr_debug("%s: %d core is offline\n", __func__, MIGRATE_TARGET_CORE);
+               udelay(100);
+               if (!cpu_online(MIGRATE_TARGET_CORE)) {
+                       pr_debug("%s: %d core is offline\n", __func__, MIGRATE_TARGET_CORE);
+                       ret = -EPERM;
+                       goto error;
+               }
+               pr_debug("%s: %d core is online\n", __func__, MIGRATE_TARGET_CORE);
+       }
+
+       if (secos_booster_request_pm_qos(&secos_booster_cluster1_qos, freq)) { /* KHz */
+               ret = -EPERM;
+               goto error;
+       }
+
+       ret = nq_switch_core(MIGRATE_TARGET_CORE);
+       if (ret) {
+               pr_err("%s: mc switch failed : err:%d\n", __func__, ret);
+               secos_booster_request_pm_qos(&secos_booster_cluster1_qos, 0);
+               ret = -EPERM;
+               goto error;
+       }
+
+       if (boost_policy == STB_PERFORMANCE) {
+               /* Restore origin performance policy after spend default boost time */
+               if (boost_time == 0)
+                       boost_time = DEFAULT_SECOS_BOOST_TIME;
+
+               hrtimer_cancel(&timer);
+               hrtimer_start(&timer, ns_to_ktime((u64)boost_time * NSEC_PER_MSEC),
+                               HRTIMER_MODE_REL);
+       } else {
+               /* Change schedule policy */
+               mc_set_schedule_policy(MIGRATE_TARGET_CORE);
+       }
+
+out:
+       mutex_unlock(&boost_lock);
+       return ret;
+
+error:
+       mc_boost_usage_count--;
+       mutex_unlock(&boost_lock);
+       return ret;
+}
+
+int secos_booster_stop(void)
+{
+       int ret = 0;
+
+       mutex_lock(&boost_lock);
+       mc_boost_usage_count--;
+       mc_set_schedule_policy(DEFAULT_LITTLE_CORE);
+
+       if (mc_boost_usage_count > 0) {
+               goto out;
+       } else if(mc_boost_usage_count == 0) {
+               hrtimer_cancel(&timer);
+               pr_debug("%s: mc switch to little core \n", __func__);
+               ret = nq_switch_core(current_core);
+               if (ret)
+                       pr_err("%s: mc switch core failed. err:%d\n", __func__, ret);
+
+               secos_booster_request_pm_qos(&secos_booster_cluster1_qos, 0);
+       } else {
+               /* mismatched usage count */
+               pr_warn("boost usage count sync mismatched. count : %d\n", mc_boost_usage_count);
+               mc_boost_usage_count = 0;
+       }
+
+out:
+       mutex_unlock(&boost_lock);
+       return ret;
+}
+
+static int secos_booster_pm_notifier(struct notifier_block *notifier,
+               unsigned long pm_event, void *dummy)
+{
+       mutex_lock(&boost_lock);
+       switch (pm_event) {
+               case PM_SUSPEND_PREPARE:
+                       is_suspend_prepared = true;
+                       break;
+               case PM_POST_SUSPEND:
+                       is_suspend_prepared = false;
+                       break;
+       }
+       mutex_unlock(&boost_lock);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block secos_booster_pm_notifier_block = {
+       .notifier_call = secos_booster_pm_notifier,
+};
+
+static int __init secos_booster_init(void)
+{
+       int ret;
+
+       mutex_init(&boost_lock);
+
+       ret = mc_timer_init();
+       if (ret) {
+               pr_err("%s: mc timer init error :%d\n", __func__, ret);
+               return ret;
+       }
+
+       hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       timer.function = secos_booster_hrtimer_fn;
+
+       max_cpu_freq = cpufreq_quick_get_max(MIGRATE_TARGET_CORE);
+
+       pm_qos_add_request(&secos_booster_cluster1_qos, PM_QOS_CLUSTER1_FREQ_MIN, 0);
+
+       register_pm_notifier(&secos_booster_pm_notifier_block);
+
+       return ret;
+}
+late_initcall(secos_booster_init);
diff --git a/drivers/gud/gud-exynos9610/sec-os-booster/secos_booster.h b/drivers/gud/gud-exynos9610/sec-os-booster/secos_booster.h
new file mode 100755 (executable)
index 0000000..c9e2324
--- /dev/null
@@ -0,0 +1,31 @@
+/* linux/arch/arm/mach-exynos/include/mach/secos_booster.h
+*
+* Copyright (c) 2014 Samsung Electronics Co., Ltd.
+*              http://www.samsung.com/
+*
+* Header file for secure OS booster API
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*/
+
+
+#ifndef __LINUX_SECOS_BOOST_H__
+#define __LINUX_SECOS_BOOST_H__
+
+/*
+ * Secure OS Boost Policy
+ */
+enum secos_boost_policy {
+       MAX_PERFORMANCE,
+       MID_PERFORMANCE,
+       MIN_PERFORMANCE,
+       STB_PERFORMANCE,
+       PERFORMANCE_MAX_CNT,
+};
+
+int secos_booster_start(enum secos_boost_policy policy);
+int secos_booster_stop(void);
+
+#endif
diff --git a/drivers/gud/gud-exynos9610/sec-os-ctrl/Kconfig b/drivers/gud/gud-exynos9610/sec-os-ctrl/Kconfig
new file mode 100755 (executable)
index 0000000..958a71d
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Secure OS control configuration
+#
+config SECURE_OS_CONTROL
+       bool "Secure OS control"
+       depends on TRUSTONIC_TEE
+       ---help---
+         Enable Secure OS control sysfs.
+         It can migrate a core that executes secure OS tasks
+         and check a current core on secure OS.
diff --git a/drivers/gud/gud-exynos9610/sec-os-ctrl/Makefile b/drivers/gud/gud-exynos9610/sec-os-ctrl/Makefile
new file mode 100755 (executable)
index 0000000..b7827ac
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for Secure OS control sysfs
+#
+obj-$(CONFIG_SECURE_OS_CONTROL) += sec_os_ctrl.o
+
+# MobiCore kernel driver path
+GUD_ROOT_FOLDER := drivers/gud/gud-exynos9610/
+
+# Includes MobiCore kernel driver
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
diff --git a/drivers/gud/gud-exynos9610/sec-os-ctrl/sec_os_ctrl.c b/drivers/gud/gud-exynos9610/sec-os-ctrl/sec_os_ctrl.c
new file mode 100755 (executable)
index 0000000..f434f8c
--- /dev/null
@@ -0,0 +1,119 @@
+/* drivers/gud/sec-os-ctrl/sec_os_ctrl.c
+ *
+ * Secure OS control driver for Samsung Exynos
+ *
+ * Copyright (c) 2014 Samsung Electronics
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#define DEFAULT_LITTLE_CORE    1
+#define DEFAULT_BIG_CORE       6
+#define ASCII_TO_DIGIT_NUM(ascii)      (ascii - '0')
+
+static unsigned int current_core, new_core;
+static DEFINE_MUTEX(sec_os_ctrl_lock);
+
+int nq_switch_core(uint32_t cpu);
+uint32_t mc_active_core(void);
+
+static struct bus_type sec_os_ctrl_subsys = {
+       .name = "sec_os_ctrl",
+       .dev_name = "sec_os_ctrl",
+};
+
+/* Migrate Secure OS */
+static ssize_t migrate_os_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int ret = 0;
+       unsigned int core_num = 0;
+
+       /* Select only big or LITTLE */
+       if ((buf[0] != 'L') && (buf[0] != 'b')) {
+               pr_err("Invalid core number\n");
+               return count;
+       }
+
+       /* Derive core number */
+       core_num = ASCII_TO_DIGIT_NUM(buf[1]);
+       if (buf[0] == 'L') {
+               if ((buf[1] == 0xA) || (buf[1] == 0x0)) {       /* if LF(Line Feed, 0xA) or NULL(0x0) */
+                       new_core = DEFAULT_LITTLE_CORE;
+               } else if (core_num < 4) {      /* From core 0 to core 3 */
+                       new_core = core_num;
+               } else {
+                       pr_err("[LITTLE] Enter correct core number(0~3)\n");
+                       return count;
+               }
+       } else if (buf[0] == 'b') {
+               if ((buf[1] == 0xA) || (buf[1] == 0x0)) {       /* if LF(Line Feed, 0xA) or NULL(0x0) */
+                       new_core = DEFAULT_BIG_CORE;
+               } else if (core_num < 4) {      /* From core 0 to core 3 */
+                       new_core = core_num + 4;
+               } else {
+                       pr_err("[big] Enter correct core number(0~3)\n");
+                       return count;
+               }
+       }
+       pr_info("Secure OS will be migrated into core [%d]\n", new_core);
+
+       if (mutex_lock_interruptible(&sec_os_ctrl_lock)) {
+               pr_err("Fail to get lock\n");
+               return count;
+       }
+       ret = nq_switch_core(new_core);
+       mutex_unlock(&sec_os_ctrl_lock);
+       if (ret != 0) {
+               pr_err("Secure OS migration is failed!\n");
+               pr_err("Return value = %d\n", ret);
+               return count;
+       }
+
+       return count;
+}
+
+/* The current core where Secure OS is on */
+static ssize_t current_core_show(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       current_core = mc_active_core();
+
+       return sprintf(buf, "Secure OS is on core [%c%d]\n",
+                       (current_core < 4) ? 'L' : 'b', (current_core & 3));
+}
+
+static struct kobj_attribute migrate_os_attr =
+       __ATTR(migrate_os, 0600, NULL, migrate_os_store);
+
+static struct kobj_attribute current_core_attr =
+       __ATTR(current_core, 0600, current_core_show, NULL);
+
+static struct attribute *sec_os_ctrl_sysfs_attrs[] = {
+       &migrate_os_attr.attr,
+       &current_core_attr.attr,
+       NULL,
+};
+
+static struct attribute_group sec_os_ctrl_sysfs_group = {
+       .attrs = sec_os_ctrl_sysfs_attrs,
+};
+
+static const struct attribute_group *sec_os_ctrl_sysfs_groups[] = {
+       &sec_os_ctrl_sysfs_group,
+       NULL,
+};
+
+static int __init sec_os_ctrl_init(void)
+{
+       return subsys_system_register(&sec_os_ctrl_subsys, sec_os_ctrl_sysfs_groups);
+}
+late_initcall(sec_os_ctrl_init);