int reserved[2];
};
+struct vipx_ioc_unload_kernel_binary32 {
+ unsigned int size;
+ unsigned int global_id;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
struct vipx_ioc_load_graph_info32 {
unsigned int size;
struct vipx_common_graph_info graph_info;
#define VIPX_IOC_LOAD_KERNEL_BINARY32 \
_IOWR('V', 0, struct vipx_ioc_load_kernel_binary32)
+#define VIPX_IOC_UNLOAD_KERNEL_BINARY32 \
+ _IOWR('V', 1, struct vipx_ioc_unload_kernel_binary32)
#define VIPX_IOC_LOAD_GRAPH_INFO32 \
- _IOWR('V', 1, struct vipx_ioc_load_graph_info32)
+ _IOWR('V', 2, struct vipx_ioc_load_graph_info32)
#define VIPX_IOC_UNLOAD_GRAPH_INFO32 \
- _IOWR('V', 2, struct vipx_ioc_unload_graph_info32)
+ _IOWR('V', 3, struct vipx_ioc_unload_graph_info32)
#define VIPX_IOC_EXECUTE_SUBMODEL32 \
- _IOWR('V', 3, struct vipx_ioc_execute_submodel32)
+ _IOWR('V', 4, struct vipx_ioc_execute_submodel32)
static int __vipx_ioctl_get_graph32(struct vs4l_graph *karg,
struct vs4l_graph32 __user *uarg)
vipx_leave();
}
+static int __vipx_ioctl_get_unload_kernel_binary32(
+ struct vipx_ioc_unload_kernel_binary *karg,
+ struct vipx_ioc_unload_kernel_binary32 __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ if (get_user(karg->size, &uarg->size) ||
+ get_user(karg->global_id, &uarg->global_id)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [Unload Kernel Binary(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_unload_kernel_binary32(
+ struct vipx_ioc_unload_kernel_binary *karg,
+ struct vipx_ioc_unload_kernel_binary32 __user *uarg)
+{
+ vipx_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vipx_err("Copy failed to user [Unload kernel binary(32)]\n");
+ }
+ vipx_leave();
+}
+
static int __vipx_ioctl_get_load_graph_info32(
struct vipx_ioc_load_graph_info *karg,
struct vipx_ioc_load_graph_info32 __user *uarg)
&uarg->timestamp[3].tv_sec) ||
put_user(karg->timestamp[3].tv_nsec,
&uarg->timestamp[3].tv_nsec)) {
- vipx_err("Copy failed to user [Load kernel binary(32)]\n");
+ vipx_err("Copy failed to user [Unload graph_info(32)]\n");
}
vipx_leave();
}
ret = ops->load_kernel_binary(vctx, &karg.kernel_bin);
__vipx_ioctl_put_load_kernel_binary32(&karg.kernel_bin, uarg);
break;
+ case VIPX_IOC_UNLOAD_KERNEL_BINARY32:
+ ret = __vipx_ioctl_get_unload_kernel_binary32(&karg.unload_kbin,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->unload_kernel_binary(vctx, &karg.unload_kbin);
+ __vipx_ioctl_put_unload_kernel_binary32(&karg.unload_kbin,
+ uarg);
+ break;
case VIPX_IOC_LOAD_GRAPH_INFO32:
ret = __vipx_ioctl_get_load_graph_info32(&karg.load_ginfo,
uarg);
return ret;
}
+static int vipx_context_unload_kernel_binary(struct vipx_context *vctx,
+ struct vipx_ioc_unload_kernel_binary *unload_kbin)
+{
+ int ret;
+
+ vipx_enter();
+ vipx_dbg("[%s] unload kernel binary (framework)\n", __func__);
+ vipx_dbg("model_id : %#x\n", unload_kbin->global_id);
+
+ ret = vipx_kernel_binary_unload(vctx, unload_kbin->global_id);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
static int vipx_context_load_graph_info(struct vipx_context *vctx,
struct vipx_ioc_load_graph_info *ginfo)
{
static const struct vipx_context_ops vipx_context_ops = {
.load_kernel_binary = vipx_context_load_kernel_binary,
+ .unload_kernel_binary = vipx_context_unload_kernel_binary,
.load_graph_info = vipx_context_load_graph_info,
.unload_graph_info = vipx_context_unload_graph_info,
.execute_submodel = vipx_context_execute_submodel
struct vipx_context_ops {
int (*load_kernel_binary)(struct vipx_context *vctx,
struct vipx_ioc_load_kernel_binary *kernel_bin);
+ int (*unload_kernel_binary)(struct vipx_context *vctx,
+ struct vipx_ioc_unload_kernel_binary *unload_kbin);
int (*load_graph_info)(struct vipx_context *vctx,
struct vipx_ioc_load_graph_info *ginfo);
int (*unload_graph_info)(struct vipx_context *vctx,
return 0;
}
+static int vipx_core_unload_kernel_binary(struct vipx_context *vctx,
+ struct vipx_ioc_unload_kernel_binary *args)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock for loading kernel binary (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->unload_kernel_binary(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vipx_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ /* return value is included in args->ret */
+ return 0;
+}
+
static int vipx_core_load_graph_info(struct vipx_context *vctx,
struct vipx_ioc_load_graph_info *args)
{
.streamoff = vipx_core_streamoff,
.load_kernel_binary = vipx_core_load_kernel_binary,
+ .unload_kernel_binary = vipx_core_unload_kernel_binary,
.load_graph_info = vipx_core_load_graph_info,
.unload_graph_info = vipx_core_unload_graph_info,
.execute_submodel = vipx_core_execute_submodel,
vipx_leave();
}
+static int __vipx_ioctl_get_unload_kernel_binary(
+ struct vipx_ioc_unload_kernel_binary *karg,
+ struct vipx_ioc_unload_kernel_binary __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vipx_err("Copy failed [Unload kernel binary] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_unload_kernel_binary(
+ struct vipx_ioc_unload_kernel_binary *karg,
+ struct vipx_ioc_unload_kernel_binary __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vipx_err("Copy failed to user [Unload kernel binary]\n");
+
+ vipx_leave();
+}
+
static int __vipx_ioctl_get_load_graph_info(
struct vipx_ioc_load_graph_info *karg,
struct vipx_ioc_load_graph_info __user *uarg)
ret = ops->load_kernel_binary(vctx, &karg.kernel_bin);
__vipx_ioctl_put_load_kernel_binary(&karg.kernel_bin, uarg);
break;
+ case VIPX_IOC_UNLOAD_KERNEL_BINARY:
+ ret = __vipx_ioctl_get_unload_kernel_binary(&karg.unload_kbin,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->unload_kernel_binary(vctx, &karg.unload_kbin);
+ __vipx_ioctl_put_unload_kernel_binary(&karg.unload_kbin, uarg);
+ break;
case VIPX_IOC_LOAD_GRAPH_INFO:
ret = __vipx_ioctl_get_load_graph_info(&karg.load_ginfo,
uarg);
int reserved[2];
};
+struct vipx_ioc_unload_kernel_binary {
+ unsigned int size;
+ unsigned int global_id;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
struct vipx_ioc_load_graph_info {
unsigned int size;
struct vipx_common_graph_info graph_info;
#define VIPX_IOC_LOAD_KERNEL_BINARY \
_IOWR('V', 0, struct vipx_ioc_load_kernel_binary)
+#define VIPX_IOC_UNLOAD_KERNEL_BINARY \
+ _IOWR('V', 1, struct vipx_ioc_unload_kernel_binary)
#define VIPX_IOC_LOAD_GRAPH_INFO \
- _IOWR('V', 1, struct vipx_ioc_load_graph_info)
+ _IOWR('V', 2, struct vipx_ioc_load_graph_info)
#define VIPX_IOC_UNLOAD_GRAPH_INFO \
- _IOWR('V', 2, struct vipx_ioc_unload_graph_info)
+ _IOWR('V', 3, struct vipx_ioc_unload_graph_info)
#define VIPX_IOC_EXECUTE_SUBMODEL \
- _IOWR('V', 3, struct vipx_ioc_execute_submodel)
+ _IOWR('V', 4, struct vipx_ioc_execute_submodel)
union vipx_ioc_arg {
struct vs4l_graph graph;
struct vs4l_container_list clist;
struct vipx_ioc_load_kernel_binary kernel_bin;
+ struct vipx_ioc_unload_kernel_binary unload_kbin;
struct vipx_ioc_load_graph_info load_ginfo;
struct vipx_ioc_unload_graph_info unload_ginfo;
struct vipx_ioc_execute_submodel exec;
/* dal */
int (*load_kernel_binary)(struct vipx_context *vctx,
struct vipx_ioc_load_kernel_binary *args);
+ int (*unload_kernel_binary)(struct vipx_context *vctx,
+ struct vipx_ioc_unload_kernel_binary *args);
int (*load_graph_info)(struct vipx_context *vctx,
struct vipx_ioc_load_graph_info *args);
int (*unload_graph_info)(struct vipx_context *vctx,
return ret;
}
+int vipx_kernel_binary_unload(struct vipx_context *vctx, unsigned int global_id)
+{
+ struct vipx_kernel_binary *kbin, *temp;
+ unsigned int kid, mid;
+
+ vipx_enter();
+ mid = GET_COMMON_GRAPH_MODEL_ID(global_id);
+ list_for_each_entry_safe(kbin, temp, &vctx->binary_list, clist) {
+ kid = GET_COMMON_GRAPH_MODEL_ID(kbin->global_id);
+ if (kid == mid)
+ vipx_kernel_binary_remove(kbin);
+ }
+ vipx_leave();
+ return 0;
+}
+
void vipx_kernel_binary_remove(struct vipx_kernel_binary *kbin)
{
struct vipx_context *vctx;
struct vipx_graph_model *gmodel);
int vipx_kernel_binary_add(struct vipx_context *vctx, unsigned int id,
int fd, unsigned int size);
+int vipx_kernel_binary_unload(struct vipx_context *vctx,
+ unsigned int global_id);
void vipx_kernel_binary_remove(struct vipx_kernel_binary *kbin);
void vipx_kernel_binary_all_remove(struct vipx_context *vctx);