---help---
Blank the framebuffer before starting a TUI session
+#ExySp
source "drivers/gud/gud-exynos9610/sec-os-ctrl/Kconfig"
source "drivers/gud/gud-exynos9610/sec-os-booster/Kconfig"
obj-$(CONFIG_TRUSTONIC_TEE) := MobiCoreDriver/
obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui/
+#ExySp
obj-$(CONFIG_SECURE_OS_CONTROL) += sec-os-ctrl/
obj-$(CONFIG_SECURE_OS_BOOSTER_API) += sec-os-booster/
return ret;
}
+static inline int load_key_so(struct mc_admin_load_info *key_so)
+{
+ struct tee_mmu *mmu;
+ struct mcp_buffer_map map;
+ struct mc_ioctl_buffer buf;
+ int ret;
+
+ buf.va = (uintptr_t)key_so->address;
+ buf.len = key_so->length;
+ buf.flags = MC_IO_MAP_INPUT;
+ mmu = tee_mmu_create(current->mm, &buf);
+ if (IS_ERR(mmu))
+ return PTR_ERR(mmu);
+
+ tee_mmu_buffer(mmu, &map);
+ ret = mcp_load_key_so(key_so->address, &map);
+ tee_mmu_put(mmu);
+ return ret;
+}
+
static ssize_t admin_write(struct file *file, const char __user *user,
size_t len, loff_t *off)
{
ret = load_check(&info);
break;
}
+ case MC_ADMIN_IO_LOAD_KEY_SO: {
+ struct mc_admin_load_info info;
+
+ if (copy_from_user(&info, uarg, sizeof(info))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = load_key_so(&info);
+ break;
+ }
default:
ret = -ENOIOCTLCMD;
}
/*
- * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
*/
#ifndef MOBICORE_COMPONENT_BUILD_TAG
#define MOBICORE_COMPONENT_BUILD_TAG \
- "t-base-Exynos-Android-410a-v001-20180329_190510_46723_76543"
+ "t-base-Exynos-Android-410a-V003-20180920_200500_55985_85552"
#endif
struct mutex sessions_lock; /* sessions list + closing */
/* Client lock for quick WSMs and operations changes */
struct mutex quick_lock;
+ /* Client lock for CWSMs release functions */
+ struct mutex cwsm_release_lock;
/* List of WSMs for a client */
struct list_head cwsms;
/* List of GP operation for a client */
void client_put_cwsm_sva(struct tee_client *client, u32 sva)
{
- struct cwsm *cwsm = cwsm_find_by_sva(client, sva);
+ struct cwsm *cwsm;
+ mutex_lock(&client->cwsm_release_lock);
+ cwsm = cwsm_find_by_sva(client, sva);
if (!cwsm)
- return;
+ goto end;
- /* Release reference taken by cwsm_find */
+ /* Release reference taken by cwsm_find_by_sva */
cwsm_put(cwsm);
cwsm_put(cwsm);
+end:
+ mutex_unlock(&client->cwsm_release_lock);
}
/*
mutex_init(&client->sessions_lock);
INIT_LIST_HEAD(&client->list);
mutex_init(&client->quick_lock);
+ mutex_init(&client->cwsm_release_lock);
INIT_LIST_HEAD(&client->cwsms);
INIT_LIST_HEAD(&client->operations);
/* Add client to list of clients */
{
struct cwsm *cwsm = NULL;
+ if (memref->size > BUFFER_LENGTH_MAX) {
+ mc_dev_err(-EINVAL, "buffer size %llu too big", memref->size);
+ return -EINVAL;
+ }
+
if (!mmu)
/* cwsm_find automatically takes a reference */
cwsm = cwsm_find(client, memref);
int client_gp_release_shared_mem(struct tee_client *client,
const struct gp_shared_memory *memref)
{
- struct cwsm *cwsm = cwsm_find(client, memref);
+ struct cwsm *cwsm;
+ int ret = 0;
- if (!cwsm)
- return -ENOENT;
+ mutex_lock(&client->cwsm_release_lock);
+ cwsm = cwsm_find(client, memref);
+ if (!cwsm) {
+ ret = -ENOENT;
+ goto end;
+ }
/* Release reference taken by cwsm_find */
cwsm_put(cwsm);
cwsm_put(cwsm);
- return 0;
+end:
+ mutex_unlock(&client->cwsm_release_lock);
+ return ret;
}
/*
if (!client)
return -EINVAL;
- if (!len || len > BUFFER_LENGTH_MAX)
+ if (!len) {
+ mc_dev_err(-EINVAL, "buffer size 0 not supported");
return -EINVAL;
+ }
+
+ if (len > BUFFER_LENGTH_MAX) {
+ mc_dev_err(-EINVAL, "buffer size %u too big", len);
+ return -EINVAL;
+ }
order = get_order(len);
if (order > MAX_ORDER) {
/*
* Remove a cbuf object from client, and mark it for freeing.
* Freeing will happen once all current references are released.
+ *
+ * Note: this function could be subject to the same race condition as
+ * client_gp_release_shared_mem() and client_put_cwsm_sva(), but it is trusted
+ * as it can only be called by kernel drivers. So no lock around
+ * cbuf_get_by_addr() and the two tee_cbuf_put().
*/
int client_cbuf_free(struct tee_client *client, uintptr_t addr)
{
return -EINVAL;
}
- /* Two references to put: the caller's and the one we just took */
+ /* Release reference taken by cbuf_get_by_addr */
tee_cbuf_put(cbuf);
mutex_lock(&client->cbufs_lock);
cbuf->api_freed = true;
} out;
};
-union fc_switch_core {
- union fc_common common;
-
- struct {
- u32 cmd;
- u32 core_id;
- } in;
-
- struct {
- u32 resp;
- u32 ret;
- u32 state;
- u32 ext_info;
- } out;
-};
-
union fc_nsiq {
union fc_common common;
/* Structure to log SMC calls */
struct smc_log_entry {
u64 cpu_clk;
+ int cpu_id;
union fc_common fc;
};
/* Log SMC call */
smc_log[smc_log_index].cpu_clk = local_clock();
+ smc_log[smc_log_index].cpu_id = raw_smp_processor_id();
smc_log[smc_log_index].fc = *fc;
if (++smc_log_index >= SMC_LOG_SIZE)
smc_log_index = 0;
fc.in.nq_info = (u32)(((addr_high & 0xFFFF) << 16) | (q_len & 0xFFFF));
/* mcp buffer start/length [16:16] [start, length] */
fc.in.mcp_info = (u32)((off << 16) | (buf_len & 0xFFFF));
- mc_dev_devel("cmd=%d, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
+ mc_dev_devel("cmd=0x%08x, base=0x%08x, nq_info=0x%08x, mcp_info=0x%08x",
fc.in.cmd, fc.in.base, fc.in.nq_info,
fc.in.mcp_info);
return smc(&fc);
return smc(&fc);
}
-int fc_switch_core(int core_id)
-{
- union fc_switch_core fc;
-
- memset(&fc, 0, sizeof(fc));
- fc.in.cmd = MC_FC_SWAP_CPU;
- fc.in.core_id = core_id;
- return smc(&fc);
-}
-
static int show_smc_log_entry(struct kasnprintf_buf *buf,
struct smc_log_entry *entry)
{
- return kasnprintf(buf, "%20llu %10d 0x%08x 0x%08x 0x%08x\n",
- entry->cpu_clk, (s32)entry->fc.in.cmd,
+ return kasnprintf(buf, "%20llu %10d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ entry->cpu_clk, entry->cpu_id, entry->fc.in.cmd,
entry->fc.in.param[0], entry->fc.in.param[1],
entry->fc.in.param[2]);
}
{
int i, ret = 0;
- ret = kasnprintf(buf, "%20s %10s %-10s %-10s %-10s\n",
+ ret = kasnprintf(buf, "%10s %20s %10s %-10s %-10s %-10s\n", "CPU id",
"CPU clock", "command", "param1", "param2", "param3");
if (ret < 0)
return ret;
int fc_trace_deinit(void);
int fc_nsiq(u32 session_id, u32 payload);
int fc_yield(u32 timeslice);
-int fc_switch_core(int core_id);
int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf);
case TEEC_MEMREF_TEMP_OUTPUT:
case TEEC_MEMREF_TEMP_INOUT:
if (operation->params[i].tmpref.buffer) {
+ struct gp_temp_memref *tmpref;
+
+ tmpref = &operation->params[i].tmpref;
/* Prepare buffer to map */
- bufs[i].va = operation->params[i].tmpref.buffer;
- bufs[i].len = operation->params[i].tmpref.size;
+ bufs[i].va = tmpref->buffer;
+ if (tmpref->size > BUFFER_LENGTH_MAX) {
+ mc_dev_err(-EINVAL,
+ "buffer size %llu too big",
+ tmpref->size);
+ return -EINVAL;
+ }
+
+ bufs[i].len = tmpref->size;
if (param_type == TEEC_MEMREF_TEMP_INPUT)
bufs[i].flags = MC_IO_MAP_INPUT;
else if (param_type == TEEC_MEMREF_TEMP_OUTPUT)
#define LOG_INTEGER_DECIMAL (0x0200)
#define LOG_INTEGER_SIGNED (0x0400)
+/* active cpu id */
+#define LOG_CPUID_MASK (0xF000)
+#define LOG_CPUID_SHIFT 12
+
struct mc_logmsg {
u16 ctrl; /* Type and format of data */
u16 source; /* Unique value for each event source */
bool dead;
} log_ctx;
-static inline void log_eol(u16 source)
+static inline void log_eol(u16 source, u32 cpuid)
{
if (!log_ctx.line_len)
return;
if (log_ctx.prev_source)
/* TEE user-space */
- dev_info(g_ctx.mcd, "%03x|%s\n", log_ctx.prev_source,
- log_ctx.line);
+ dev_info(g_ctx.mcd, "%03x(%u)|%s\n", log_ctx.prev_source,
+ cpuid, log_ctx.line);
else
/* TEE kernel */
- dev_info(g_ctx.mcd, "mtk|%s\n", log_ctx.line);
-
+ dev_info(g_ctx.mcd, "mtk(%u)|%s\n", cpuid, log_ctx.line);
log_ctx.line[0] = '\0';
log_ctx.line_len = 0;
}
* Collect chars in log_ctx.line buffer and output the buffer when it is full.
* No locking needed because only "mobicore_log" thread updates this buffer.
*/
-static inline void log_char(char ch, u16 source)
+static inline void log_char(char ch, u16 source, u32 cpuid)
{
if (ch == '\0')
return;
if (ch == '\n' || ch == '\r') {
- log_eol(source);
+ log_eol(source, cpuid);
return;
}
- if (log_ctx.line_len >= LOG_LINE_SIZE ||
- source != log_ctx.prev_source)
- log_eol(source);
+ if (log_ctx.line_len >= LOG_LINE_SIZE || source != log_ctx.prev_source)
+ log_eol(source, cpuid);
log_ctx.line[log_ctx.line_len++] = ch;
log_ctx.line[log_ctx.line_len] = 0;
log_ctx.prev_source = source;
}
-static inline void log_string(u32 ch, u16 source)
+static inline void log_string(u32 ch, u16 source, u32 cpuid)
{
while (ch) {
- log_char(ch & 0xFF, source);
+ log_char(ch & 0xFF, source, cpuid);
ch >>= 8;
}
}
-static inline void log_number(u32 format, u32 value, u16 source)
+static inline void log_number(u32 format, u32 value, u16 source, u32 cpuid)
{
int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
char fmt[16];
snprintf(buffer, sizeof(buffer), fmt, value);
while (*reader)
- log_char(*reader++, source);
+ log_char(*reader++, source, cpuid);
}
static inline int log_msg(void *data)
{
struct mc_logmsg *msg = (struct mc_logmsg *)data;
int log_type = msg->ctrl & LOG_TYPE_MASK;
+ int cpuid = ((msg->ctrl & LOG_CPUID_MASK) >> LOG_CPUID_SHIFT);
switch (log_type) {
case LOG_TYPE_CHAR:
- log_string(msg->log_data, msg->source);
+ log_string(msg->log_data, msg->source, cpuid);
break;
case LOG_TYPE_INTEGER:
- log_number(msg->ctrl, msg->log_data, msg->source);
+ log_number(msg->ctrl, msg->log_data, msg->source, cpuid);
break;
}
if (msg->ctrl & LOG_EOL)
- log_eol(msg->source);
+ log_eol(msg->source, cpuid);
return sizeof(*msg);
}
/*
* Setup MobiCore kernel log. It assumes it's running on CORE 0!
- * The fastcall will complain is that is not the case!
+ * The fastcall will complain if that is not the case!
*/
int logging_init(phys_addr_t *buffer, u32 *size)
{
#include "public/mc_user.h"
#include "public/mc_admin.h" /* MC_ADMIN_DEVNODE */
-#include "public/mc_linux_api.h" /* mc_switch_core */
#include "platform.h" /* MC_PM_RUNTIME */
#include "main.h"
#include "xen_fe.h"
#include "build_tag.h"
+/* ExySp */
#define MC_DEVICE_PROPNAME "samsung,exynos-tee"
/* Default entry for our driver in device tree */
.llseek = default_llseek,
};
-static ssize_t debug_coreswitch_write(struct file *file,
- const char __user *buffer,
- size_t buffer_len, loff_t *ppos)
-{
- int new_cpu = 0;
-
- /* Invalid data, nothing to do */
- if (buffer_len < 1)
- return -EINVAL;
-
- if (kstrtoint_from_user(buffer, buffer_len, 0, &new_cpu))
- return -EINVAL;
-
- mc_dev_devel("set active cpu to %d", new_cpu);
- mc_switch_core(new_cpu);
- return buffer_len;
-}
-
-static ssize_t debug_coreswitch_read(struct file *file, char __user *buffer,
- size_t buffer_len, loff_t *ppos)
-{
- char cpu_str[8];
- int ret = 0;
-
- ret = snprintf(cpu_str, sizeof(cpu_str), "%d\n", mc_active_core());
- if (ret < 0)
- return -EINVAL;
-
- return simple_read_from_buffer(buffer, buffer_len, ppos,
- cpu_str, ret);
-}
-
-static const struct file_operations debug_coreswitch_ops = {
- .write = debug_coreswitch_write,
- .read = debug_coreswitch_read,
-};
-
static inline int device_user_init(void)
{
struct device *dev;
/* Create debugfs info entries */
debugfs_create_file("structs_counters", 0400, g_ctx.debug_dir, NULL,
&debug_struct_counters_ops);
- debugfs_create_file("active_cpu", 0600, g_ctx.debug_dir, NULL,
- &debug_coreswitch_ops);
/* Initialize common API layer */
client_init();
ret = device_admin_init();
if (ret)
goto err_admin;
- }
#ifndef MC_DELAYED_TEE_START
- ret = mobicore_start();
+ ret = mobicore_start();
#endif
- if (ret)
- goto err_start;
+ if (ret)
+ goto err_start;
+ }
return 0;
err_start:
- if (!is_xen_domu())
- device_admin_exit();
+ device_admin_exit();
err_admin:
device_common_exit();
err_common:
#define MC_FC_INIT MC_FC_STD32(1) /**< Initializing FastCall. */
#define MC_FC_INFO MC_FC_STD32(2) /**< Info FastCall. */
#define MC_FC_MEM_TRACE MC_FC_STD32(10) /**< Enable SWd tracing via memory */
-#define MC_FC_SWAP_CPU MC_FC_STD32(54) /**< Change new active Core */
#else
#define MC_FC_INIT ((u32)(-1)) /**< Initializing FastCall. */
#define MC_FC_INFO ((u32)(-2)) /**< Info FastCall. */
#define MC_FC_MEM_TRACE ((u32)(-31)) /**< Enable SWd tracing via memory */
-#define MC_FC_SWAP_CPU ((u32)(0x84000005)) /**< Change new active Core */
#endif
#define MC_EXT_INFO_ID_MC_EXC_UUID1 24
#define MC_EXT_INFO_ID_MC_EXC_UUID2 25
#define MC_EXT_INFO_ID_MC_EXC_UUID3 26
+/**< MobiCore exception handler last crashing task offset */
+#define MC_EXT_INFO_ID_TASK_OFFSET 27
+/**< MobiCore exception handler last crashing task's mclib offset */
+#define MC_EXT_INFO_ID_MCLIB_OFFSET 28
/** @} */
/*
- * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
MC_MCP_CMD_LOAD_TOKEN = 0x0B,
/** Check that TA can be loaded */
MC_MCP_CMD_CHECK_LOAD_TA = 0x0C,
+ /** Load a decryption key */
+ MC_MCP_CMD_LOAD_SYSENC_KEY_SO = 0x0D,
};
/*
struct rsp_header rsp_header; /** Response header */
};
+/** @defgroup MCPLOADKEYSO
+ * Load a key SO from the normal world and share it with the TEE
+ * If something fails, the device attestation functionality will be disabled
+ */
+
+/** Load key SO */
+struct cmd_load_key_so {
+ struct cmd_header cmd_header; /** Command header */
+ u32 wsm_data_type; /** Type of MMU */
+ u64 adr_load_data; /** Physical address of the MMU */
+ u64 ofs_load_data; /** Offset to the data */
+ u64 len_load_data; /** Length of the data */
+};
+
+/** Load key SO Command Response */
+struct rsp_load_key_so {
+ struct rsp_header rsp_header; /** Response header */
+};
+
/** Structure of the MCP buffer */
union mcp_message {
struct init_values init_values; /** Initialisation values */
struct rsp_load_token rsp_load_token;
struct cmd_check_load cmd_check_load; /** TA load check */
struct rsp_check_load rsp_check_load;
+ struct cmd_load_key_so cmd_load_key_so;/** Load key SO */
+ struct rsp_load_key_so rsp_load_key_so;
};
-/** Minimum MCP buffer length (in bytes) */
-#define MIN_MCP_LEN sizeof(mcp_message_t)
-
#define MC_FLAG_NO_SLEEP_REQ 0
#define MC_FLAG_REQ_TO_SLEEP 1
/** MobiCore status flags */
struct mcp_flags {
- /** If not MC_FLAG_SCHEDULE_IDLE, MobiCore needsscheduling */
+ /** If not MC_FLAG_SCHEDULE_IDLE, MobiCore needs scheduling */
u32 schedule;
struct sleep_mode sleep_mode;
/** Secure-world sleep timeout in milliseconds */
return "load token";
case MC_MCP_CMD_CHECK_LOAD_TA:
return "check load TA";
+ case MC_MCP_CMD_LOAD_SYSENC_KEY_SO:
+ return "load Key SO";
}
return "unknown";
}
return mcp_cmd(&cmd, 0, NULL, &cmd.cmd_check_load.uuid);
}
+int mcp_load_key_so(uintptr_t data, const struct mcp_buffer_map *map)
+{
+ union mcp_message cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd_header.cmd_id = MC_MCP_CMD_LOAD_SYSENC_KEY_SO;
+ cmd.cmd_load_key_so.wsm_data_type = map->type;
+ cmd.cmd_load_key_so.adr_load_data = map->addr;
+ cmd.cmd_load_key_so.ofs_load_data = map->offset;
+ cmd.cmd_load_key_so.len_load_data = map->length;
+ return mcp_cmd(&cmd, 0, NULL, NULL);
+}
+
int mcp_open_session(struct mcp_session *session, struct mcp_open_info *info,
bool *tci_in_use)
{
int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *buffer_map);
int mcp_load_check(const struct tee_object *obj,
const struct mcp_buffer_map *buffer_map);
+int mcp_load_key_so(uintptr_t data, const struct mcp_buffer_map *buffer_map);
int mcp_open_session(struct mcp_session *session, struct mcp_open_info *info,
bool *tci_in_use);
int mcp_close_session(struct mcp_session *session);
}
#endif
+static inline long gup_local_repeat(struct mm_struct *mm, uintptr_t start,
+ unsigned long nr_pages, int write,
+ struct page **pages)
+{
+ int retries = 10;
+ long ret = 0;
+
+ while (retries--) {
+ ret = gup_local(mm, start, nr_pages, write, pages);
+
+ if (-EBUSY != ret)
+ break;
+ }
+
+ return ret;
+}
+
/*
* A table that could be either a pmd or pte
*/
* Linux creates (page faults) the underlying pages if
* missing.
*/
- gup_ret = gup_local(mm, (uintptr_t)reader,
- nr_pages, 1, pages);
+ gup_ret = gup_local_repeat(mm, (uintptr_t)reader,
+ nr_pages, 1, pages);
if ((gup_ret == -EFAULT) && !writeable) {
/*
* If mapping read/write fails, and the buffer
* is to be shared as input only, try to map
* again read-only.
*/
- gup_ret = gup_local(mm, (uintptr_t)reader,
- nr_pages, 0, pages);
+ gup_ret = gup_local_repeat(mm,
+ (uintptr_t)reader,
+ nr_pages, 0, pages);
}
up_read(&mm->mmap_sem);
if (gup_ret < 0) {
* GNU General Public License for more details.
*/
-#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include "platform.h" /* CPU-related information */
#include "public/mc_user.h"
-#include "public/mc_linux_api.h" /* mc_switch_core */
#include "mci/mcifc.h"
#include "mci/mciiwp.h"
#define SCHEDULING_FREQ 5 /**< N-SIQ every n-th time */
#define DEFAULT_TIMEOUT_MS 20000 /* We do nothing on timeout anyway */
-/* If not forced by platform header, use defaults below */
-
-#ifndef CPU_IDS
-#define CPU_IDS { 0x0000, 0x0001, 0x0002, 0x0003, \
- 0x0100, 0x0101, 0x0102, 0x0103, \
- 0x0200, 0x0201, 0x0202, 0x0203 }
-#endif
-
-static const u32 cpu_ids[] = CPU_IDS;
-
static struct {
struct mutex buffer_mutex; /* Lock on SWd communication buffer */
struct mcp_buffer *mcp_buffer;
struct mcp_time *time;
/* Scheduler */
- int active_cpu; /* We always start on CPU #0 */
- int next_cpu; /* If core switch required */
struct task_struct *tee_scheduler_thread;
bool tee_scheduler_run;
bool tee_hung;
bool log_buffer_busy;
} l_ctx;
-#ifdef MC_SMC_FASTCALL
-static inline int nq_set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
-{
- return 0;
-}
-#else /* MC_SMC_FASTCALL */
-static inline int nq_set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
-{
- return set_cpus_allowed_ptr(p, &new_mask);
-}
-#endif /* ! MC_SMC_FASTCALL */
-
-static inline int switch_to_online_core(int dying_cpu)
-{
- int cpu;
-
- if (l_ctx.active_cpu != dying_cpu) {
- mc_dev_devel("not active CPU, no action taken");
- return 0;
- }
-
- /* Chose the first online CPU and switch! */
- for_each_online_cpu(cpu) {
- if (cpu != dying_cpu) {
- mc_dev_info("CPU #%d is dying, switching to CPU #%d",
- dying_cpu, cpu);
- return mc_switch_core(cpu);
- }
-
- mc_dev_devel("skipping CPU #%d", dying_cpu);
- }
-
- return 0;
-}
-
-#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
-static int cpu_notifer_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- int cpu = (int)(uintptr_t)hcpu;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- mc_dev_devel("CPU #%d is going to die", cpu);
- switch_to_online_core(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_notifer = {
- .notifier_call = cpu_notifer_callback,
-};
-#else
-static int nq_cpu_down_prep(unsigned int cpu)
-{
- mc_dev_devel("CPU #%d is going to die", cpu);
- return switch_to_online_core(cpu);
-}
-#endif
-
static inline bool is_iwp_id(u32 id)
{
return (id & SID_IWP_NOTIFICATION) != 0;
{ MC_EXT_INFO_ID_MC_EXC_IPCMSG, "mcExcep.cause"},
/**< MobiCore exception handler last IPC data */
{MC_EXT_INFO_ID_MC_EXC_IPCDATA, "mcExcep.meta"},
+ /**< MobiCore last crashing task offset */
+ {MC_EXT_INFO_ID_TASK_OFFSET,
+ "faultRec.offset.task"},
+ /**< MobiCore last crashing task's mcLib offset */
+ {MC_EXT_INFO_ID_MCLIB_OFFSET,
+ "faultRec.offset.mclib"},
};
char uuid_str[33];
if (fc_info(status_map[i].index, NULL, &info))
return;
- mc_dev_info(" %-20s= 0x%08x", status_map[i].msg, info);
+ mc_dev_info(" %-22s= 0x%08x", status_map[i].msg, info);
if (ret >= 0)
- ret = kasnprintf(&l_ctx.dump, "%-20s= 0x%08x\n",
+ ret = kasnprintf(&l_ctx.dump, "%-22s= 0x%08x\n",
status_map[i].msg, info);
}
}
}
- mc_dev_info(" %-20s= 0x%s", "mcExcep.uuid", uuid_str);
+ mc_dev_info(" %-22s= 0x%s", "mcExcep.uuid", uuid_str);
if (ret >= 0)
- ret = kasnprintf(&l_ctx.dump, "%-20s= 0x%s\n", "mcExcep.uuid",
+ ret = kasnprintf(&l_ctx.dump, "%-22s= 0x%s\n", "mcExcep.uuid",
uuid_str);
if (ret < 0) {
return timeout_ms == 0;
}
-static inline int nq_switch_core(void)
-{
- int cpu = l_ctx.next_cpu;
- int core_id;
- int ret;
-
- if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu))
- return -EINVAL;
-
- core_id = cpu_ids[cpu];
- ret = fc_switch_core(core_id);
- logging_run();
- if (ret) {
- mc_dev_err(ret, "failed to switch core from %d to %d",
- l_ctx.active_cpu, cpu);
- return ret;
- }
-
- mc_dev_devel("switched core from %d to %d", l_ctx.active_cpu, cpu);
- l_ctx.active_cpu = cpu;
- return ret;
-}
-
/*
* This thread, and only this thread, schedules the SWd. Hence, reading the idle
* status and its associated timeout is safe from race conditions.
*/
static int tee_scheduler(void *arg)
{
- int timeslice = 0; /* Actually scheduling period */
+ bool swd_notify = false;
int ret = 0;
/* Enable TEE clock */
case NONE:
break;
case YIELD:
- /* Yield forced: increment timeslice */
- timeslice++;
+ swd_notify = false;
break;
case NSIQ:
- timeslice = 0;
+ swd_notify = true;
break;
case SUSPEND:
/* Force N_SIQ */
- timeslice = 0;
+ swd_notify = true;
set_sleep_mode_rq(MC_FLAG_REQ_TO_SLEEP);
pm_request = true;
break;
case RESUME:
/* Force N_SIQ */
- timeslice = 0;
+ swd_notify = true;
set_sleep_mode_rq(MC_FLAG_NO_SLEEP_REQ);
pm_request = true;
break;
}
- /* Switch core */
- if (l_ctx.next_cpu != l_ctx.active_cpu && !nq_switch_core()) {
- cpumask_t cpu_mask;
-
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(l_ctx.active_cpu, &cpu_mask);
- nq_set_cpus_allowed(l_ctx.tee_scheduler_thread,
- cpu_mask);
- }
-
l_ctx.request = NONE;
nq_update_time();
mutex_unlock(&l_ctx.request_mutex);
l_ctx.mcp_buffer->flags.timeout_ms = -1;
mutex_unlock(&l_ctx.buffer_mutex);
- if (timeslice--) {
- /* Resume SWd from where it was */
- fc_yield(timeslice);
- } else {
+ if (swd_notify) {
u32 session_id = 0;
u32 payload = 0;
retrieve_last_session_payload(&session_id, &payload);
- timeslice = SCHEDULING_FREQ;
+ swd_notify = false;
/* Call SWd scheduler */
fc_nsiq(session_id, payload);
+ } else {
+ /* Resume SWd from where it was */
+ fc_yield(0);
}
/* Always flush log buffer after the SWd has run */
return ret;
}
- /* The scheduler/fastcall thread MUST run on CPU 0 at startup */
- nq_set_cpus_allowed(l_ctx.tee_scheduler_thread, CPU_MASK_CPU0);
wake_up_process(l_ctx.tee_scheduler_thread);
wait_for_completion(&l_ctx.boot_complete);
unsigned long mci;
int ret;
- if (nr_cpu_ids) {
-#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
- ret = register_cpu_notifier(&cpu_notifer);
-#else
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "tee/trustonic:online",
- NULL, nq_cpu_down_prep);
-#endif
- /* ExySp : Kinibi 410 */
- if (ret < 0) {
- mc_dev_err(ret, "cpu online callback setup failed");
- goto err_register;
- }
- }
-
ret = mc_clock_init();
if (ret)
goto err_clock;
err_logging:
mc_clock_exit();
err_clock:
- if (nr_cpu_ids)
-#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
- unregister_cpu_notifier(&cpu_notifer);
-#else
- cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
-#endif
-err_register:
return ret;
}
free_pages((unsigned long)l_ctx.mci, l_ctx.order);
logging_exit(l_ctx.log_buffer_busy);
- if (nr_cpu_ids)
-#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
- unregister_cpu_notifier(&cpu_notifer);
-#else
- cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
-#endif
mc_clock_exit();
}
-
-int mc_active_core(void)
-{
- return l_ctx.active_cpu;
-}
-
-int mc_switch_core(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- if (!cpu_online(cpu))
- return -EPERM;
-
- l_ctx.next_cpu = cpu;
- /* Ping the tee_scheduler thread to update */
- nq_scheduler_command(YIELD);
-
- return 0;
-}
/*
- * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
defined(CONFIG_SOC_EXYNOS5433) || defined(CONFIG_SOC_EXYNOS7870) || \
defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS7880) || defined(CONFIG_SOC_EXYNOS8895)
#define MC_INTR_SSIQ 255
+#elif defined(CONFIG_SOC_EXYNOS7885)
+#define MC_INTR_SSIQ 97
#elif defined(CONFIG_SOC_EXYNOS7420) || defined(CONFIG_SOC_EXYNOS7580)
#define MC_INTR_SSIQ 246
#endif
_IOW(MC_IOC_MAGIC, 3, struct mc_admin_load_info)
#define MC_ADMIN_IO_LOAD_CHECK \
_IOW(MC_IOC_MAGIC, 4, struct mc_admin_load_info)
+#define MC_ADMIN_IO_LOAD_KEY_SO \
+ _IOW(MC_IOC_MAGIC, 5, struct mc_admin_load_info)
#ifdef __cplusplus
}
+++ /dev/null
-/*
- * Copyright (c) 2013-2017 TRUSTONIC LIMITED
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef _MC_LINUX_API_H_
-#define _MC_LINUX_API_H_
-
-#include <linux/types.h>
-
-/*
- * Switch TEE active core to core_num, defined as linux
- * core id
- */
-int mc_switch_core(int core_num);
-
-/*
- * Return TEE active core as Linux core id
- */
-int mc_active_core(void);
-
-#endif /* _MC_LINUX_API_H_ */
return -EINVAL;
}
+ if (buf->len > BUFFER_LENGTH_MAX) {
+ mc_dev_err(-EINVAL, "buffer size %u too big", buf->len);
+ return -EINVAL;
+ }
+
wsm->mmu = client_mmu_create(session->client, buf, &wsm->cbuf);
if (IS_ERR(wsm->mmu))
return PTR_ERR(wsm->mmu);
struct mc_identity *mcp_id = (struct mc_identity *)mcp_identity;
u8 hash[SHA1_HASH_SIZE] = { 0 };
bool application = false;
+ bool supplied_ca_identity = false;
const void *data;
unsigned int data_len;
+ static const u8 zero_buffer[sizeof(identity->login_data)] = { 0 };
/* Copy login type */
mcp_identity->login_type = identity->login_type;
switch (identity->login_type) {
case LOGIN_PUBLIC:
- case LOGIN_USER:
case LOGIN_GROUP:
break;
+ case LOGIN_USER:
+ data = NULL;
+ data_len = 0;
+ break;
case LOGIN_APPLICATION:
application = true;
+ supplied_ca_identity = true;
data = NULL;
data_len = 0;
break;
case LOGIN_USER_APPLICATION:
application = true;
+ supplied_ca_identity = true;
data = &mcp_id->uid;
data_len = sizeof(mcp_id->uid);
break;
return -EINVAL;
}
- if (application) {
+ /* let the supplied login_data pass through if it is LOGIN_APPLICATION
+ * or LOGIN_USER_APPLICATION and not a zero-filled buffer
+ * That buffer is expected to contain a NWd computed hash containing the
+ * CA identity
+ */
+ if (supplied_ca_identity &&
+ memcmp(identity->login_data, zero_buffer,
+ sizeof(identity->login_data)) != 0) {
+ memcpy(&mcp_id->login_data, identity->login_data,
+ sizeof(mcp_id->login_data));
+ } else if (application) {
int ret = hash_path_and_data(task, hash, data, data_len);
if (ret) {
u32 *return_origin)
{
struct mc_uuid_t uuid;
- struct mc_identity identity;
+ struct mc_identity identity = {0};
struct tee_client *client = NULL;
struct gp_operation gp_op;
struct gp_return gp_ret;
{
struct tee_client *client = get_client(file);
- if ((vmarea->vm_end - vmarea->vm_start) > BUFFER_LENGTH_MAX)
+ if ((vmarea->vm_end - vmarea->vm_start) > BUFFER_LENGTH_MAX) {
+ mc_dev_err(-EINVAL, "buffer size %lu too big",
+ vmarea->vm_end - vmarea->vm_start);
return -EINVAL;
+ }
/* Alloc contiguous buffer for this client */
return client_cbuf_create(client,
/* GP operations */
struct mutex gp_operations_lock;
struct list_head gp_operations;
+ /* Last back-end state,
+ * to overcome an issue in some Xen implementations
+ */
+ int last_be_state;
} l_ctx;
struct xen_fe_mc_session {
struct tee_xfe *xfe = l_ctx.xfe;
mc_dev_devel("be state changed to %d", be_state);
+
+ if (be_state == l_ctx.last_be_state) {
+ /* Protection against duplicated notifications (TBUG-1387) */
+ mc_dev_devel("be state (%d) already set... ignoring", be_state);
+ return;
+ }
+
switch (be_state) {
case XenbusStateUnknown:
case XenbusStateInitialising:
case XenbusStateReconfigured:
break;
}
+
+ /* Refresh last back-end state */
+ l_ctx.last_be_state = be_state;
}
static struct xenbus_driver xen_fe_driver = {
/*
- * Copyright (c) 2013-2017 TRUSTONIC LIMITED
+ * Copyright (c) 2013-2018 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
*/
#ifndef MOBICORE_COMPONENT_BUILD_TAG
#define MOBICORE_COMPONENT_BUILD_TAG \
- "t-base-Exynos-Android-410a-v001-20180329_190510_46723_76543"
+ "t-base-Exynos-Android-410a-V003-20180920_200500_55985_85552"
#endif
pr_info("t-base-tui module: ioctl 0x%x ", cmd);
switch (cmd) {
+ case TUI_IO_SET_RESOLUTION:
+ /* TLC_TUI_CMD_SET_RESOLUTION is for specific platforms
+ * that rely on onConfigurationChanged to set resolution
+ * it has no effect on Trustonic reference implementaton.
+ */
+ pr_info("TLC_TUI_CMD_SET_RESOLUTION\n");
+ /* NOT IMPLEMENTED */
+ ret = 0;
+ break;
case TUI_IO_NOTIFY:
pr_info("TUI_IO_NOTIFY\n");
u32 screen_metrics[3];
};
+/* Resolution */
+struct tlc_tui_resolution_t {
+ u32 width;
+ u32 height;
+};
+
/* Command IDs */
/* */
#define TLC_TUI_CMD_NONE 0
#define TLC_TUI_CMD_HIDE_SURFACE 7
#define TLC_TUI_CMD_GET_RESOLUTION 8
+/* TLC_TUI_CMD_SET_RESOLUTION is for specific platforms
+ * that rely on onConfigurationChanged to set resolution
+ * it has no effect on Trustonic reference implementaton.
+ */
+#define TLC_TUI_CMD_SET_RESOLUTION 9
+
/* Return codes */
#define TLC_TUI_OK 0
#define TLC_TUI_ERROR 1
#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, struct tlc_tui_command_t)
#define TUI_IO_ACK _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
#define TUI_IO_INIT_DRIVER _IO(TUI_IO_MAGIC, 4)
+#define TUI_IO_SET_RESOLUTION _IOW(TUI_IO_MAGIC, 9, struct tlc_tui_resolution_t)
#ifdef INIT_COMPLETION
#define reinit_completion(x) INIT_COMPLETION(*(x))
TLC_TUI_CMD_START_ACTIVITY,
dci->cmd_nwd.payload.alloc_data.num_of_buff,
dci->cmd_nwd.payload.alloc_data.alloc_size);
- if (ret != TUI_DCI_OK)
+ if (ret != TUI_DCI_OK) {
+ pr_debug("%s:%d return value is 0x%x.\n", __func__,
+ __LINE__, ret);
break;
+ }
/*****************************************************************************/