iommu-domain_vipx {
compatible = "samsung,exynos-iommu-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges;
- domain-clients = <&vipx>;
+ /* #address-cells = <2>; */
+ /* #size-cells = <1>; */
+ /* ranges; */
+ #dma-address-cells = <1>;
+ #dma-size-cells = <1>;
+ dma-window = <0x30000000 0xB0000000>;
+
+ domain-clients = <&vipx>, <&vipx_vertex>;
};
iommu-domain_abox {
"GATE_VIPX2_QCH",
"GATE_VIPX2_QCH_LOCAL";
- /*samsung,power-domain = <&pd_vipx2>;*/
interrupts = <0 129 0>,
<0 130 0>;
iommus = <&sysmmu_vipx1>, <&sysmmu_vipx2>;
status = "ok";
};
+ vipx_vertex: vipx_vertex@10D60000 {
+ compatible = "samsung,exynos-vipx-vertex";
+ id = <0>;
+ reg = <0x0 0x10D60000 0x10000>, /* VIPX_CPU_SS1 */
+ <0x0 0x10F60000 0x10000>, /* VIPX_CPU_SS2 */
+ <0x0 0x10D90000 0x2000>, /* ITCM(8K) */
+ <0x0 0x10DA0000 0x4000>; /* DTCM(16K) */
+ pinctrl-names = "default","release";
+ pinctrl-0 = <>;
+ pinctrl-1 = <>;
+ clocks = <&clock UMUX_CLKCMU_VIPX1_BUS>,
+ <&clock GATE_VIPX1_QCH>,
+ <&clock UMUX_CLKCMU_VIPX2_BUS>,
+ <&clock GATE_VIPX2_QCH>,
+ <&clock GATE_VIPX2_QCH_LOCAL>;
+ clock-names = "UMUX_CLKCMU_VIPX1_BUS",
+ "GATE_VIPX1_QCH",
+ "UMUX_CLKCMU_VIPX2_BUS",
+ "GATE_VIPX2_QCH",
+ "GATE_VIPX2_QCH_LOCAL";
+
+ interrupts = <0 129 0>,
+ <0 130 0>;
+ iommus = <&sysmmu_vipx1>, <&sysmmu_vipx2>;
+ status = "ok";
+ };
};
CONFIG_VISION_SUPPORT=y
CONFIG_EXYNOS_VIPX=y
CONFIG_EXYNOS_VIPX_EXYNOS9610=y
-CONFIG_EXYNOS_VIPX_HARDWARE=y
+CONFIG_EXYNOS_VIPX_VERTEX=y
+CONFIG_EXYNOS_VIPX_VERTEX_EXYNOS9610=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
menuconfig VISION_SUPPORT
- bool "Vision Support"
- select VISION_CORE
- help
- If you want to use hardware acceleration for vision
- enable this option and other options below.
+ bool "Vision Support"
+ help
+ If you want to use hardware acceleration for vision
+ enable this option and other options below.
if VISION_SUPPORT
-source "drivers/vision/vision-core/Kconfig"
+
source "drivers/vision/vipx/Kconfig"
+
+source "drivers/vision/vipx_vertex/Kconfig"
+
endif # VISION_SUPPORT
-obj-$(CONFIG_VISION_CORE) += vision-core/
-obj-$(CONFIG_EXYNOS_VIPX) += vipx/
+obj-$(CONFIG_EXYNOS_VIPX) += vipx/
+obj-$(CONFIG_EXYNOS_VIPX_VERTEX) += vipx_vertex/
+++ /dev/null
-/* include/media/videobuf2-ion.h
- *
- * Copyright 2011-2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Definition of Android ION memory allocator for videobuf2
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _MEDIA_VIDEOBUF2_ION_H
-#define _MEDIA_VIDEOBUF2_ION_H
-
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-//#include <linux/ion.h>
-//#include <linux/exynos_ion.h>
-#include <linux/err.h>
-#include <linux/dma-buf.h>
-
-/* flags to vb2_ion_create_context
- * These flags are dependet upon heap flags in ION.
- *
- * bit 0 ~ ION_NUM_HEAPS: ion heap flags
- * bit ION_NUM_HEAPS+1 ~ 20: non-ion flags (cached, iommu)
- * bit 21 ~ BITS_PER_INT - 1: ion specific flags
- */
-
-/* DMA of the client device is coherent with CPU */
-#define VB2ION_CTX_COHERENT_DMA (1 << (ION_NUM_HEAPS + 3))
-/* DMA should read from memory instead of CPU cache even if DMA is coherent */
-#define VB2ION_CTX_UNCACHED_READ_DMA (1 << (ION_NUM_HEAPS + 4))
-
-#define VB2ION_CONTIG_ID_NUM 16
-#define VB2ION_NUM_HEAPS 8
-/* below 6 is the above vb2-ion flags (ION_NUM_HEAPS + 1 ~ 6) */
-
-
-struct device;
-struct vb2_buffer;
-
-/* - vb2_ion_set_noncoherent_dma_read(): Forces the DMA to read from system
- * memory even though it is capable of snooping CPU caches.
- */
-void vb2_ion_set_noncoherent_dma_read(struct device *dev, bool noncoherent);
-
-/* Data type of the cookie returned by vb2_plane_cookie() function call.
- * The drivers do not need the definition of this structure. The only reason
- * why it is defined outside of videobuf2-ion.c is to make some functions
- * inline.
- */
-struct vb2_ion_cookie {
- dma_addr_t ioaddr;
- dma_addr_t paddr;
- struct sg_table *sgt;
- off_t offset;
-};
-
-/* vb2_ion_buffer_offset - return the mapped offset of the buffer
- * - cookie: pointer returned by vb2_plane_cookie()
- *
- * Returns offset value that the mapping starts from.
- */
-
-static inline off_t vb2_ion_buffer_offset(void *cookie)
-{
- return IS_ERR_OR_NULL(cookie) ?
- -EINVAL : ((struct vb2_ion_cookie *)cookie)->offset;
-}
-
-/* vb2_ion_phys_address - returns the physical address of the given buffer
- * - cookie: pointer returned by vb2_plane_cookie()
- * - phys_addr: pointer to the store of the physical address of the buffer
- * specified by cookie.
- *
- * Returns -EINVAL if the buffer does not have nor physically contiguous memory.
- */
-static inline int vb2_ion_phys_address(void *cookie, phys_addr_t *phys_addr)
-{
- struct vb2_ion_cookie *vb2cookie = cookie;
-
- if (WARN_ON(!phys_addr || IS_ERR_OR_NULL(cookie)))
- return -EINVAL;
-
- if (vb2cookie->paddr) {
- *phys_addr = vb2cookie->paddr;
- } else {
- if (vb2cookie->sgt && vb2cookie->sgt->nents == 1) {
- *phys_addr = sg_phys(vb2cookie->sgt->sgl) +
- vb2cookie->offset;
- } else {
- *phys_addr = 0;
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/* vb2_ion_dma_address - returns the DMA address that device can see
- * - cookie: pointer returned by vb2_plane_cookie()
- * - dma_addr: pointer to the store of the address of the buffer specified
- * by cookie. It can be either IO virtual address or physical address
- * depending on the specification of allocation context which allocated
- * the buffer.
- *
- * Returns -EINVAL if the buffer has neither IO virtual address nor physically
- * contiguous memory
- */
-static inline int vb2_ion_dma_address(void *cookie, dma_addr_t *dma_addr)
-{
- struct vb2_ion_cookie *vb2cookie = cookie;
-
- if (WARN_ON(!dma_addr || IS_ERR_OR_NULL(cookie)))
- return -EINVAL;
-
- if (vb2cookie->ioaddr == 0)
- return vb2_ion_phys_address(cookie, (phys_addr_t *)dma_addr);
-
- *dma_addr = vb2cookie->ioaddr;
-
- return 0;
-}
-
-/* vb2_ion_get_sg - returns scatterlist of the given cookie.
- * - cookie: pointer returned by vb2_plane_cookie()
- * - nents: pointer to the store of number of elements in the returned
- * scatterlist
- *
- * Returns the scatterlist of the buffer specified by cookie.
- * If the arguments are not correct, returns NULL.
- */
-static inline struct scatterlist *vb2_ion_get_sg(void *cookie, int *nents)
-{
- struct vb2_ion_cookie *vb2cookie = cookie;
-
- if (WARN_ON(!nents || IS_ERR_OR_NULL(cookie)))
- return NULL;
-
- *nents = vb2cookie->sgt->nents;
- return vb2cookie->sgt->sgl;
-}
-
-/* vb2_ion_get_dmabuf - returns dmabuf of the given cookie
- * - cookie: pointer returned by vb2_plane_cookie()
- *
- * Returns dma-buf descriptor with a reference held.
- * NULL if the cookie is invalid or the buffer is not dmabuf.
- * The caller must put the dmabuf when it is no longer need the dmabuf
- */
-struct dma_buf *vb2_ion_get_dmabuf(void *cookie);
-
-/***** Cache mainatenance operations *****/
-void vb2_ion_sync_for_device(void *cookie, off_t offset, size_t size,
- enum dma_data_direction dir);
-void vb2_ion_sync_for_cpu(void *cookie, off_t offset, size_t size,
- enum dma_data_direction dir);
-int vb2_ion_buf_prepare(struct vb2_buffer *vb);
-void vb2_ion_buf_finish(struct vb2_buffer *vb);
-int vb2_ion_buf_prepare_exact(struct vb2_buffer *vb);
-int vb2_ion_buf_finish_exact(struct vb2_buffer *vb);
-
-extern const struct vb2_mem_ops vb2_ion_memops;
-extern struct ion_device *ion_exynos; /* drivers/gpu/ion/exynos/exynos-ion.c */
-
-#endif /* _MEDIA_VIDEOBUF2_ION_H */
+++ /dev/null
-/*
- * Samsung Exynos SoC series VPU driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VISION_BUFFER_H_
-#define VISION_BUFFER_H_
-
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/poll.h>
-#include <linux/dma-buf.h>
-#include <linux/time.h>
-
-#include <media/videobuf2-core.h>
-#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
-#include <media/videobuf2-cma-phys.h>
-#elif defined(CONFIG_VIDEOBUF2_DMA_SG)
-//#include <media/videobuf2-dma-sg.h>
-#endif
-
-#include "vision-config.h"
-#include "vs4l.h"
-
-#define VB_MAX_BUFFER VISION_MAX_BUFFER
-#define VB_MAX_PLANES VISION_MAX_PLANE
-
-struct vb_queue;
-
-struct vb_fmt {
- char *name;
- u32 colorspace;
- u32 planes;
- u32 bitsperpixel[VB_MAX_PLANES];
-};
-
-struct vb_format {
- u32 target;
- struct vb_fmt *fmt;
- u32 colorspace;
- u32 plane;
- u32 width;
- u32 height;
- u32 size[VB_MAX_PLANES];
-};
-
-struct vb_format_list {
- u32 count;
- struct vb_format *formats;
-};
-
-struct vb_buffer {
- struct vs4l_roi roi;
- union {
- unsigned long userptr;
- __s32 fd;
- } m;
- struct dma_buf *dbuf;
- void *mem_priv;
- void *cookie;
- void *kvaddr;
- dma_addr_t dvaddr;
- ulong reserved;
-#if 1 // MODERN_ION
- struct dma_buf *dma_buf;
- struct dma_buf_attachment *attachment;
- struct sg_table *sgt;
- dma_addr_t daddr;
- void *vaddr;
- size_t size;
-#endif
-};
-
-struct vb_container {
- u32 type;
- u32 target;
- u32 memory;
- u32 reserved[4];
- u32 count;
- struct vb_buffer *buffers;
- struct vb_format *format;
-};
-
-struct vb_container_list {
- u32 direction;
- u32 id;
- u32 index;
- unsigned long flags;
- struct timeval timestamp[6];
- u32 count;
- u32 user_params[_MAX_NUM_OF_USER_PARAMS];
- struct vb_container *containers;
-};
-
-enum vb_bundle_state {
- VB_BUF_STATE_DEQUEUED,
- VB_BUF_STATE_QUEUED,
- VB_BUF_STATE_PROCESS,
- VB_BUF_STATE_DONE,
-};
-
-struct vb_bundle {
- /* this flag is for internal state */
- unsigned long flags;
- enum vb_bundle_state state;
- struct list_head queued_entry;
- struct list_head process_entry;
- struct list_head done_entry;
-
- struct vb_container_list clist;
-};
-
-struct vb_ops {
- int (*buf_prepare)(struct vb_queue *q, struct vb_container_list *clist);
- int (*buf_unprepare)(struct vb_queue *q, struct vb_container_list *clist);
-};
-
-enum vb_queue_state {
- VB_QUEUE_STATE_FORMAT,
- VB_QUEUE_STATE_START
-};
-
-struct vb_queue {
- u32 direction;
- const char *name;
- unsigned long state;
- unsigned int streaming:1;
- struct mutex *lock;
-
- struct list_head queued_list;
- atomic_t queued_count;
- struct list_head process_list;
- atomic_t process_count;
- struct list_head done_list;
- atomic_t done_count;
-
- spinlock_t done_lock;
- wait_queue_head_t done_wq;
-
- struct vb_format_list format;
- struct vb_bundle *bufs[VB_MAX_BUFFER];
- unsigned int num_buffers;
-
- void *alloc_dev;
- const struct vb2_mem_ops *mem_ops;
- const struct vb_ops *ops;
- void *private_data;
-};
-
-int vb_queue_init(struct vb_queue *q, void *alloc_ctx, const struct vb2_mem_ops *mem_ops, const struct vb_ops *ops, struct mutex *lock, u32 direction);
-int vb_queue_s_format(struct vb_queue *q, struct vs4l_format_list *f);
-int vb_queue_start(struct vb_queue *q);
-int vb_queue_stop(struct vb_queue *q);
-int vb_queue_qbuf(struct vb_queue *q, struct vs4l_container_list *c);
-int vb_queue_dqbuf(struct vb_queue *q, struct vs4l_container_list *c, bool nonblocking);
-void vb_queue_process(struct vb_queue *q, struct vb_bundle *vb);
-void vb_queue_done(struct vb_queue *q, struct vb_bundle *vb);
-
-#define call_memop(q, op, args...) (((q)->mem_ops->op) ? ((q)->mem_ops->op(args)) : 0)
-#define call_op(q, op, args...) (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VPU driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VISION_CONFIG_H_
-#define VISION_CONFIG_H_
-
-#define VISION_MAX_BUFFER 16
-#define VISION_MAX_PLANE 3
-#define VISION_MAP_KVADDR
-
-/*
-#define probe_info(fmt, ...) pr_info("[V]" fmt, ##__VA_ARGS__)
-#define probe_warn(fmt, args...) pr_warning("[V][WRN]" fmt, ##args)
-#define probe_err(fmt, args...) pr_err("[V][ERR]%s:%d:" fmt, __func__, __LINE__, ##args)
-*/
-#define DISABLE_VISION_LOG 1
-
-#if DISABLE_VISION_LOG
-#define vision_err_target(fmt, ...)
-#define vision_warn_target(fmt, ...)
-#define vision_info_target(fmt, ...)
-#define vision_dbg_target(fmt, ...)
-
-#else
-
-#ifdef DEBUG_LOG_MEMORY
-#define vision_err_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#define vision_warn_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#define vision_info_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#define vision_dbg_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#else
-#define vision_err_target(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
-#define vision_warn_target(fmt, ...) pr_warning(fmt, ##__VA_ARGS__)
-#define vision_info_target(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
-#define vision_dbg_target(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
-#endif
-
-#endif
-#define vision_err(fmt, args...) \
- vision_err_target("[V][ERR]%s:%d:" fmt, __func__, __LINE__, ##args)
-
-#define vision_info(fmt, args...) \
- vision_info_target("[V]" fmt, ##args)
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VPU driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VISION_DEV_H_
-#define VISION_DEV_H_
-
-#include <linux/fs.h>
-#include <linux/device.h>
-
-#define VISION_NUM_DEVICES 256
-#define VISION_MAJOR 82
-#define VISION_NAME "vision4linux"
-
-enum VISION_DEVICE_TYPE {
- VISION_DEVICE_TYPE_VERTEX
-};
-
-struct vision_file_ops {
- struct module *owner;
- int (*open) (struct file *);
- int (*release) (struct file *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
- long (*ioctl) (struct file *, unsigned int, unsigned long);
- long (*compat_ioctl)(struct file *file, unsigned int, unsigned long);
-};
-
-enum VISION_FLAGS {
- VISION_FL_REGISTERED
-};
-
-struct vision_device {
- int minor;
- char name[32];
- unsigned long flags;
- int index;
- u32 type;
-
- /* device ops */
- const struct vision_file_ops *fops;
-
- /* sysfs */
- struct device dev;
- struct cdev *cdev;
- struct device *parent;
-
- /* vb2_queue associated with this device node. May be NULL. */
- //struct vb2_queue *queue;
-
-
- int debug; /* Activates debug level*/
-
- /* callbacks */
- void (*release)(struct vision_device *vdev);
-
- /* ioctl callbacks */
- const struct vertex_ioctl_ops *ioctl_ops;
- //DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
-
- /* serialization lock */
- //DECLARE_BITMAP(disable_locking, BASE_VIDIOC_PRIVATE);
- struct mutex *lock;
-};
-
-struct vision_device *vision_devdata(struct file *file);
-int vision_register_device(struct vision_device *vdev, int minor, struct module *owner);
-
-#endif
\ No newline at end of file
+++ /dev/null
-/*
- * Samsung Exynos SoC series VPU driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "vs4l.h"
-
-#ifndef VISION_IOCTL_H_
-#define VISION_IOCTL_H_
-
-struct vertex_ioctl_ops {
- int (*vertexioc_s_graph)(struct file *file, struct vs4l_graph *graph);
- int (*vertexioc_s_format)(struct file *file, struct vs4l_format_list *flist);
- int (*vertexioc_s_param)(struct file *file, struct vs4l_param_list *plist);
- int (*vertexioc_s_ctrl)(struct file *file, struct vs4l_ctrl *ctrl);
- int (*vertexioc_qbuf)(struct file *file, struct vs4l_container_list *clist);
- int (*vertexioc_dqbuf)(struct file *file, struct vs4l_container_list *clist);
- int (*vertexioc_streamon)(struct file *file);
- int (*vertexioc_streamoff)(struct file *file);
-};
-
-long vertex_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-long vertex_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg);
-
-#endif
-
+++ /dev/null
-/*
- * Samsung Exynos SoC series VPU driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VISION_FOR_LINUX_H_
-#define VISION_FOR_LINUX_H_
-
-#define VS4L_VERSION 5
-#define VS4L_TARGET_SC 0xFFFF
-#define VS4L_TARGET_SC_SHIFT 16
-#define VS4L_TARGET_PU 0xFFFF
-#define VS4L_TARGET_PU_SHIFT 0
-#define _MAX_NUM_OF_USER_PARAMS 180
-
-
-enum vs4l_graph_flag {
- VS4L_GRAPH_FLAG_FIXED,
- VS4L_GRAPH_FLAG_SHARED_AMONG_SUBCHAINS,
- VS4L_GRAPH_FLAG_SHARING_AMONG_SUBCHAINS_PREFERRED,
- VS4L_GRAPH_FLAG_SHARED_AMONG_TASKS,
- VS4L_GRAPH_FLAG_SHARING_AMONG_TASKS_PREFERRED,
- VS4L_GRAPH_FLAG_DSBL_LATENCY_BALANCING,
- VS4L_STATIC_ALLOC_LARGE_MPRB_INSTEAD_SMALL_FLAG,
- VS4L_STATIC_ALLOC_PU_INSTANCE_LSB,
- VS4L_GRAPH_FLAG_PRIMITIVE,
- VS4L_GRAPH_FLAG_EXCLUSIVE,
- VS4L_GRAPH_FLAG_PERIODIC,
- VS4L_GRAPH_FLAG_ROUTING,
- VS4L_GRAPH_FLAG_BYPASS,
- VS4L_GRAPH_FLAG_END
-};
-
-struct vs4l_graph {
- __u32 id;
- __u32 priority;
- __u32 time; /* in millisecond */
- __u32 flags;
- __u32 size;
- unsigned long addr;
-};
-
-struct vs4l_format {
- __u32 target;
- __u32 format;
- __u32 plane;
- __u32 width;
- __u32 height;
-};
-
-struct vs4l_format_list {
- __u32 direction;
- __u32 count;
- struct vs4l_format *formats;
-};
-
-struct vs4l_param {
- __u32 target;
- unsigned long addr;
- __u32 offset;
- __u32 size;
-};
-
-struct vs4l_param_list {
- __u32 count;
- struct vs4l_param *params;
-};
-
-struct vs4l_ctrl {
- __u32 ctrl;
- __u32 value;
-};
-
-struct vs4l_roi {
- __u32 x;
- __u32 y;
- __u32 w;
- __u32 h;
-};
-
-struct vs4l_buffer {
- struct vs4l_roi roi;
- union {
- unsigned long userptr;
- __s32 fd;
- } m;
- unsigned long reserved;
-};
-
-enum vs4l_buffer_type {
- VS4L_BUFFER_LIST,
- VS4L_BUFFER_ROI,
- VS4L_BUFFER_PYRAMID
-};
-
-enum vs4l_memory {
- VS4L_MEMORY_USERPTR = 1,
- VS4L_MEMORY_VIRTPTR,
- VS4L_MEMORY_DMABUF
-};
-
-struct vs4l_container {
- __u32 type;
- __u32 target;
- __u32 memory;
- __u32 reserved[4];
- __u32 count;
- struct vs4l_buffer *buffers;
-};
-
-enum vs4l_direction {
- VS4L_DIRECTION_IN = 1,
- VS4L_DIRECTION_OT
-};
-
-enum vs4l_cl_flag {
- VS4L_CL_FLAG_TIMESTAMP,
- VS4L_CL_FLAG_PREPARE = 8,
- VS4L_CL_FLAG_INVALID,
- VS4L_CL_FLAG_DONE
-};
-
-struct vs4l_container_list {
- __u32 direction;
- __u32 id;
- __u32 index;
- __u32 flags;
- struct timeval timestamp[6];
- __u32 count;
- __u32 user_params[_MAX_NUM_OF_USER_PARAMS];
- struct vs4l_container *containers;
-};
-
-/*
- * F O U R C C C O D E S F O R V I S I O N
- *
- */
-
-#define VS4L_DF_IMAGE(a, b, c, d) ((a) | (b << 8) | (c << 16) | (d << 24))
-#define VS4L_DF_IMAGE_RGB VS4L_DF_IMAGE('R', 'G', 'B', '2')
-#define VS4L_DF_IMAGE_RGBX VS4L_DF_IMAGE('R', 'G', 'B', 'A')
-#define VS4L_DF_IMAGE_NV12 VS4L_DF_IMAGE('N', 'V', '1', '2')
-#define VS4L_DF_IMAGE_NV21 VS4L_DF_IMAGE('N', 'V', '2', '1')
-#define VS4L_DF_IMAGE_YV12 VS4L_DF_IMAGE('Y', 'V', '1', '2')
-#define VS4L_DF_IMAGE_I420 VS4L_DF_IMAGE('I', '4', '2', '0')
-#define VS4L_DF_IMAGE_I422 VS4L_DF_IMAGE('I', '4', '2', '2')
-#define VS4L_DF_IMAGE_YUYV VS4L_DF_IMAGE('Y', 'U', 'Y', 'V')
-#define VS4L_DF_IMAGE_YUV4 VS4L_DF_IMAGE('Y', 'U', 'V', '4')
-#define VS4L_DF_IMAGE_U8 VS4L_DF_IMAGE('U', '0', '0', '8')
-#define VS4L_DF_IMAGE_U16 VS4L_DF_IMAGE('U', '0', '1', '6')
-#define VS4L_DF_IMAGE_U32 VS4L_DF_IMAGE('U', '0', '3', '2')
-#define VS4L_DF_IMAGE_S16 VS4L_DF_IMAGE('S', '0', '1', '6')
-#define VS4L_DF_IMAGE_S32 VS4L_DF_IMAGE('S', '0', '3', '2')
-
-/*
- * I O C T L C O D E S F O R V E R T E X D E V I C E
- *
- */
-
-#define VS4L_VERTEXIOC_S_GRAPH _IOW('V', 0, struct vs4l_graph)
-#define VS4L_VERTEXIOC_S_FORMAT _IOW('V', 1, struct vs4l_format_list)
-#define VS4L_VERTEXIOC_S_PARAM _IOW('V', 2, struct vs4l_param_list)
-#define VS4L_VERTEXIOC_S_CTRL _IOW('V', 3, struct vs4l_ctrl)
-#define VS4L_VERTEXIOC_STREAM_ON _IO('V', 4)
-#define VS4L_VERTEXIOC_STREAM_OFF _IO('V', 5)
-#define VS4L_VERTEXIOC_QBUF _IOW('V', 6, struct vs4l_container_list)
-#define VS4L_VERTEXIOC_DQBUF _IOW('V', 7, struct vs4l_container_list)
-
-#endif
menuconfig EXYNOS_VIPX
- bool "Exynos VIPx driver"
- select EXYNOS_VIPX_PLATFORM
- select EXYNOS_VIPX_INTERFACE
- help
- This is a vision image processing unit
+ bool "Exynos VIPx driver"
+ select EXYNOS_VIPX_PLATFORM
+ help
+ This is a vision image processing unit
if EXYNOS_VIPX
source "drivers/vision/vipx/platform/Kconfig"
-source "drivers/vision/vipx/interface/Kconfig"
endif
-obj-y += vipx-debug.o
+#
+# Makefile for the VIPx common driver
+#
+
obj-y += vipx-binary.o
-obj-y += vipx-time.o
-obj-y += vipx-taskmgr.o
+obj-y += vipx-compat-ioctl.o
+obj-y += vipx-ioctl.o
+obj-y += vipx-debug.o
+obj-y += vipx-device.o
obj-y += vipx-graph.o
obj-y += vipx-graphmgr.o
-obj-y += vipx-queue.o
-obj-y += vipx-vertex.o
-obj-y += vipx-device.o
+obj-y += vipx-interface.o
+obj-y += vipx-io.o
+obj-y += vipx-mailbox.o
obj-y += vipx-memory.o
+obj-y += vipx-queue.o
+obj-y += vipx-slab.o
obj-y += vipx-system.o
-obj-y += vipx-io.o
-obj-$(CONFIG_EXYNOS_VIPX_INTERFACE) += interface/
+obj-y += vipx-taskmgr.o
+obj-y += vipx-time.o
+obj-y += vipx-core.o
+obj-y += vipx-context.o
+obj-y += vipx-kernel-binary.o
+obj-y += vipx-util.o
+
obj-$(CONFIG_EXYNOS_VIPX_PLATFORM) += platform/
-EXTRA_CFLAGS += -Idrivers/vision/include -Idrivers/vision/vipx/include
+ccflags-y += -Idrivers/vision/vipx
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * Version history
- * 2017.06.27 : Initial draft
- * 2017.08.25 : Released
- * 2017.10.12 : Add BOOTUP_RSP
- */
-
-#ifndef AP_VIP_IF_H_
-#define AP_VIP_IF_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// temp for previous host ver
-#if 0
-#define AP_FOCAL_ZONE_SETTING 0
-#else
-#define AP_FOCAL_ZONE_SETTING 1
-#endif
-/**
- Typedefs
-*/
-typedef u32 dram_addr_t;
-typedef u32 graph_id_t;
-typedef u32 job_id_t;
-
-/**
- Message type
-*/
-enum
-{
- BOOTUP_RSP = 1,
- INIT_REQ,
- INIT_RSP,
- CREATE_GRAPH_REQ,
- CREATE_GRAPH_RSP,
- SET_GRAPH_REQ,
- SET_GRAPH_RSP,
- INVOKE_GRAPH_REQ,
- INVOKE_GRAPH_ACK,
- INVOKE_GRAPH_RSP,
- DESTROY_GRAPH_REQ,
- DESTROY_GRAPH_RSP,
- ABORT_GRAPH_REQ,
- ABORT_GRAPH_RSP,
- POWER_DOWN_REQ,
- POWER_DOWN_RSP,
- MAX_MSG_TYPE,
-};
-
-typedef u32 vipx_msgid_e;
-
-/**
- Constants
-*/
-enum
-{
-// temp for previous host ver
-#if 0
-MAX_NUM_OF_INPUTS = 16,
-#else
- MAX_NUM_OF_INPUTS = 24,
-#endif
-#if 0
- MAX_NUM_OF_OUTPUTS = 12,
-#else
- MAX_NUM_OF_OUTPUTS = 8,
-#endif
-// temp for previous host ver
-#if 0
- MAX_NUM_OF_USER_PARAMS = 8,
-#else
- MAX_NUM_OF_USER_PARAMS = 180,
-#endif
-};
-
-/**
- Error indications
-*/
-enum
-{
- SUCCESS = 0,
- ERROR_INVALID_GRAPH_ID = -1,
- ERROR_EXEC_PARAM_CORRUPTION = -2,
- ERROR_GRAPH_DATA_CORRUPTION = -3,
- ERROR_COMPILED_GRAPH_ENABLED = -4,
- ERROR_INVALID_MSG_TYPE = -5,
- ERROR_UNKNOWN = -6,
- ERROR_INVALID = -7,
- ERROR_MBX_BUSY = -8,
-};
-
-
-typedef s32 vipx_error_e;
-
-/**
- Predefined builtin algorithms.
-*/
-
-enum
-{
- SCENARIO_DE = 1,
- SCENARIO_SDOF_FULL,
- SCENARIO_DE_SDOF,
- SCENARIO_DE_CAPTURE,
- SCENARIO_SDOF_CAPTURE,
- SCENARIO_DE_SDOF_CAPTURE,
- SCENARIO_GDC,
- SCENARIO_ENF,
- SCENARIO_ENF_UV,
- SCENARIO_ENF_YUV,
- SCENARIO_BLEND,
- SCENARIO_AP_MAX,
-};
-
-#define SCENARIO_MAX SCENARIO_AP_MAX
-
-typedef u32 vipx_builtin_graph_id_e;
-
-typedef struct
-{
- vipx_error_e error;
-} __attribute__((__packed__)) vipx_bootup_rsp_t;
-
-typedef struct
-{
- dram_addr_t p_cc_heap; //< pointer to CC heap region
- u32 sz_cc_heap; //< size of CC heap region
-} __attribute__((__packed__)) vipx_init_req_t;
-
-typedef struct
-{
- vipx_error_e error;
-} __attribute__((__packed__)) vipx_init_rsp_t;
-
-typedef struct
-{
- dram_addr_t p_graph; //< pointer to compiled graph
- u32 sz_graph; //< size of compiled graph
-} __attribute__((__packed__)) vipx_create_graph_req_t;
-
-typedef struct
-{
- vipx_error_e error;
- graph_id_t graph_id; //< graph id that should be used for invocation
-} __attribute__((__packed__)) vipx_create_graph_rsp_t;
-
-typedef struct
-{
- graph_id_t graph_id;
-
- dram_addr_t p_lll_bin; //< pointer to VIP binary in DRAM
- u32 sz_lll_bin; //< size of VIP binary in DRAM
-
- int num_inputs; //< number of inputs for the graph
- int num_outputs; //< number of outputs for the graph
-
- u32 input_width[MAX_NUM_OF_INPUTS]; //< array of input width sizes
- u32 input_height[MAX_NUM_OF_INPUTS]; //< array of input height sizes
- u32 input_depth[MAX_NUM_OF_INPUTS]; //< array of input depth sizes
-
- u32 output_width[MAX_NUM_OF_OUTPUTS]; //< array of output width sizes
- u32 output_height[MAX_NUM_OF_OUTPUTS]; //< array of output height sizes
- u32 output_depth[MAX_NUM_OF_OUTPUTS]; //< array of output depth sizes
-
- dram_addr_t p_temp; //< pointer to DRAM area for temporary buffers
- u32 sz_temp; //< size of temporary buffer area
-} __attribute__((__packed__)) vipx_set_graph_req_t;
-
-typedef struct
-{
- vipx_error_e error;
-} __attribute__((__packed__)) vipx_set_graph_rsp_t;
-
-typedef struct
-{
- graph_id_t graph_id; //< graph id that should be used for invocation
- int num_inputs; //< number of inputs for the graph
- int num_outputs; //< number of outputs for the graph
- dram_addr_t p_input[MAX_NUM_OF_INPUTS]; //< array of input buffers
- dram_addr_t p_output[MAX_NUM_OF_OUTPUTS]; //< array of output buffers
-#if AP_FOCAL_ZONE_SETTING
- u32 user_params[MAX_NUM_OF_USER_PARAMS]; //< array of user parameters
-#endif
-} __attribute__((__packed__)) vipx_invoke_graph_req_t;
-
-typedef struct
-{
- vipx_error_e error;
- job_id_t job_id;
-} __attribute__((__packed__)) vipx_invoke_graph_ack_t;
-
-typedef struct
-{
- vipx_error_e error;
- job_id_t job_id;
-} __attribute__((__packed__)) vipx_invoke_graph_rsp_t;
-
-typedef struct
-{
- job_id_t job_id;
-} __attribute__((__packed__)) vipx_abort_graph_req_t;
-
-typedef struct
-{
- vipx_error_e error;
-} __attribute__((__packed__)) vipx_abort_graph_rsp_t;
-
-typedef struct
-{
- graph_id_t graph_id;
-} __attribute__((__packed__)) vipx_destroy_graph_req_t;
-
-typedef struct
-{
- vipx_error_e error;
-} __attribute__((__packed__)) vipx_destroy_graph_rsp_t;
-
-typedef struct
-{
- uint32_t valid;
-} __attribute__((__packed__)) vipx_powerdown_req_t;
-
-typedef struct
-{
- vipx_error_e error;
-} __attribute__((__packed__)) vipx_powerdown_rsp_t;
-
-
-union __attribute__((__packed__)) vipx_messages_u
-{
- vipx_bootup_rsp_t bootup_rsp;
- vipx_init_req_t init_req;
- vipx_init_rsp_t init_rsp;
- vipx_create_graph_req_t create_graph_req;
- vipx_create_graph_rsp_t create_graph_rsp;
- vipx_set_graph_req_t set_graph_req;
- vipx_set_graph_rsp_t set_graph_rsp;
- vipx_invoke_graph_req_t invoke_graph_req;
- vipx_invoke_graph_ack_t invoke_graph_ack;
- vipx_invoke_graph_rsp_t invoke_graph_rsp;
- vipx_destroy_graph_req_t destroy_graph_req;
- vipx_destroy_graph_rsp_t destroy_graph_rsp;
- vipx_abort_graph_req_t abort_graph_req;
- vipx_abort_graph_rsp_t abort_graph_rsp;
- vipx_powerdown_req_t powerdown_req;
- vipx_powerdown_rsp_t powerdown_rsp;
-};
-
-typedef struct
-{
- // TODO: valid flag to be removed.
- uint32_t valid;
- uint32_t transId;
- vipx_msgid_e type;
- union vipx_messages_u msg_u;
-} __attribute__((__packed__)) vipx_msg_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // AP_VIP_IF_H_
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_BINARY_H_
-#define VIPX_BINARY_H_
-
-#include <linux/device.h>
-#include <linux/firmware.h>
-
-#define VIPX_FW_PATH1 "/data/"
-#define VIPX_FW_PATH2 "/vendor/firmware/"
-
-#define VIPX_FW_DRAM_NAME "CC_DRAM_CODE_FLASH.bin"
-#define VIPX_FW_ITCM_NAME "CC_ITCM_CODE_FLASH.bin"
-#define VIPX_FW_DTCM_NAME "CC_DTCM_CODE_FLASH.bin"
-
-#define VIPX_FW_NAME_LEN 100
-#define VIPX_VERSION_SIZE 42
-
-struct vipx_binary {
- struct device *dev;
-};
-
-int vipx_binary_init(struct vipx_binary *binary, struct device *dev);
-
-int vipx_binary_read(struct vipx_binary *binary,
- char *path,
- char *name,
- void *target,
- size_t target_size);
-int vipx_binary_write(struct vipx_binary *binary,
- char *path,
- char *name,
- void *target,
- size_t target_size);
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VIPX_CONFIG_H_
-#define VIPX_CONFIG_H_
-
-/*
- * =================================================================================================
- * CONFIG - GLOBAL OPTIONS
- * =================================================================================================
- */
-
-#define VIPX_MAX_BUFFER 16
-#define VIPX_MAX_PLANE 3
-
-#define VIPX_MAX_GRAPH 32
-#define VIPX_MAX_TASK VIPX_MAX_BUFFER
-
-/* this macro determines schedule period, the unit is mile second */
-#define VIPX_TIME_TICK 5
-/*
- * =================================================================================================
- * CONFIG -PLATFORM CONFIG
- * =================================================================================================
- */
-#define VIPX_AHB_BASE_ADDR 0x20200000
-#define VIPX_STOP_WAIT_COUNT 200
-
-
-/*
- * =================================================================================================
- * CONFIG - FEATURE ENABLE
- * =================================================================================================
- */
-
-/* #define VIPX_DYNAMIC_RESOURCE */
-
-/*
- * =================================================================================================
- * CONFIG - DEBUG OPTIONS
- * =================================================================================================
- */
-
-/* #define DBG_STREAMING */
-#define DBG_HISTORY
-/* #define DBG_INTERFACE_ISR */
-/* #define DBG_TIMEMEASURE */
-#define DBG_MAP_KVADDR
-/* #define DBG_RESOURCE */
-#define DBG_MARKING
-/* #define DBG_HW_SFR */
-/* #define DBG_PRINT_TASK */
-/* #define DBG_VERBOSE_IO */
-
-#if 0
-//#define DUMP_DEBUG_LOG_REGION
-#define DEBUG_LOG_MEMORY
-//#define PRINT_DBG
-#endif
-#define DISABLE_VIPX_LOG 0
-
-#if DISABLE_VIPX_LOG
-#define probe_info(fmt, ...)
-#define probe_warn(fmt, args...)
-#define probe_err(fmt, args...)
-#define vipx_err_target(fmt, ...)
-#define vipx_warn_target(fmt, ...)
-#define vipx_info_target(fmt, ...)
-#define vipx_dbg_target(fmt, ...)
-
-#else
-
-#define probe_info(fmt, ...) pr_info("[V]" fmt, ##__VA_ARGS__)
-#define probe_warn(fmt, args...) pr_warning("[V][WRN]" fmt, ##args)
-#define probe_err(fmt, args...) pr_err("[V][ERR]%s:%d:" fmt, __func__, __LINE__, ##args)
-
-#ifdef DEBUG_LOG_MEMORY
-#define vipx_err_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#define vipx_warn_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#define vipx_info_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#ifdef PRINT_DBG
-#define vipx_dbg_target(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
-#else
-#define vipx_dbg_target(fmt, ...)
-#endif
-#else
-#define vipx_err_target(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
-#define vipx_warn_target(fmt, ...) pr_warning(fmt, ##__VA_ARGS__)
-#define vipx_info_target(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
-#define vipx_dbg_target(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
-#endif
-
-#endif // DISABLE_ALL_LOG
-
-
-#define vipx_err(fmt, args...) \
- vipx_err_target("[V][ERR]%s:%d:" fmt, __func__, __LINE__, ##args)
-
-#define vipx_ierr(fmt, vctx, args...) \
- vipx_err_target("[V][I%d][ERR]%s:%d:" fmt, vctx->idx, __func__, __LINE__, ##args)
-
-#define vipx_irerr(fmt, vctx, task, args...) \
- vipx_err_target("[V][I%d][F%d][ERR]%s:%d:" fmt, vctx->idx, task->id, __func__, __LINE__, ##args)
-
-#define vipx_warn(fmt, args...) \
- vipx_warn_target("[V][WRN]%s:%d:" fmt, __func__, __LINE__, ##args)
-
-#define vipx_iwarn(fmt, vctx, args...) \
- vipx_warn_target("[V][I%d][WRN]%s:%d:" fmt, vctx->idx, __func__, __LINE__, ##args)
-
-#define vipx_irwarn(fmt, vctx, task, args...) \
- vipx_warn_target("[V][I%d][F%d][WRN]%s:%d:" fmt, vctx->idx, task->id, __func__, __LINE__, ##args)
-
-#define vipx_info(fmt, args...) \
- vipx_info_target("[V]%s:%d:" fmt, __func__, __LINE__, ##args)
-
-#define vipx_iinfo(fmt, vctx, args...) \
- vipx_info_target("[V][I%d]" fmt, vctx->idx, ##args)
-
-#define vipx_irinfo(fmt, vctx, task, args...) \
- vipx_info_target("[V][I%d][F%d]" fmt, vctx->idx, task->id, ##args)
-
-#define vipx_dbg(fmt, args...) \
- vipx_dbg_target("[V]" fmt, ##args)
-
-#define vipx_idbg(fmt, vctx, args...) \
- vipx_dbg_target("[V][I%d]" fmt, vctx->idx, ##args)
-
-#define vipx_irdbg(fmt, vctx, task, args...) \
- vipx_dbg_target("[V][I%d][F%d]" fmt, vctx->idx, task->id, ##args)
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_CONTROL_H_
-#define VIPX_CONTROL_H_
-
-#define VISION_CTRL_VIPX_BASE 0x00010000
-
-#define VIPX_CTRL_DUMP (VISION_CTRL_VIPX_BASE + 1)
-#define VIPX_CTRL_MODE (VISION_CTRL_VIPX_BASE + 2)
-#define VIPX_CTRL_TEST (VISION_CTRL_VIPX_BASE + 3)
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_DEBUG_H_
-#define VIPX_DEBUG_H_
-
-#include <linux/types.h>
-#include <linux/timer.h>
-
-#include "vipx-config.h"
-
-#define DEBUG_SENTENCE_MAX 300
-#define DEBUG_MONITORING_PERIOD (HZ * 5)
-
-struct vipx_debug_imgdump {
- struct dentry *file;
- u32 target_graph;
- u32 target_chain;
- u32 target_pu;
- u32 target_index;
- void *kvaddr;
- void *cookie;
- size_t length;
- size_t offset;
-};
-
-struct vipx_debug_monitor {
- struct timer_list timer;
- u32 time_cnt;
- u32 tick_cnt;
- u32 sched_cnt;
- u32 done_cnt;
-};
-
-enum vipx_debug_state {
- VIPX_DEBUG_STATE_START
-};
-
-struct vipx_debug {
- unsigned long state;
-
- struct dentry *root;
- struct dentry *logfile;
- struct dentry *grpfile;
- struct dentry *buffile;
-
- /* graph */
- void *graphmgr_data;
- void *system_data;
-
- struct vipx_debug_imgdump imgdump;
- struct vipx_debug_monitor monitor;
-};
-
-struct vipx_debug_log {
- size_t dsentence_pos;
- char dsentence[DEBUG_SENTENCE_MAX];
-};
-
-s32 atoi(const char *psz_buf);
-int bitmap_scnprintf(char *buf, unsigned int buflen,
- const unsigned long *maskp, int nmaskbits);
-
-int vipx_debug_probe(struct vipx_debug *debug, void *graphmgr_data, void *interface_data);
-int vipx_debug_open(struct vipx_debug *debug);
-int vipx_debug_close(struct vipx_debug *debug);
-int vipx_debug_start(struct vipx_debug *debug);
-int vipx_debug_stop(struct vipx_debug *debug);
-
-void vipx_dmsg_concate(struct vipx_debug_log *log, const char *fmt, ...);
-char * vipx_dmsg_print(struct vipx_debug_log *log);
-int vipx_debug_memdump8(u8 *start, u8 *end);
-int vipx_debug_memdump16(u16 *start, u16 *end);
-int vipx_debug_memdump32(u32 *start, u32 *end);
-
-#ifdef DBG_HISTORY
-#define DLOG_INIT() struct vipx_debug_log vipx_debug_log = { .dsentence_pos = 0 }
-#define DLOG(fmt, ...) vipx_dmsg_concate(&vipx_debug_log, fmt, ##__VA_ARGS__)
-#define DLOG_OUT() vipx_dmsg_print(&vipx_debug_log)
-#else
-#define DLOG_INIT()
-#define DLOG(fmt, ...)
-#define DLOG_OUT() "FORBIDDEN HISTORY"
-#endif
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPX driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_EXYNOS_H_
-#define VIPX_EXYNOS_H_
-
-#include <linux/clk.h>
-#include <linux/device.h>
-
-struct vipx_exynos;
-
-enum {
- VIPX_REG_CPU_SS1 = 0,
- VIPX_REG_CPU_SS2 = 1,
- VIPX_REG_MAX_CNT,
-};
-
-struct vipx_regblock {
- unsigned int offset;
- unsigned int blocks;
- char *name;
-};
-
-enum regdata_type {
- /* read write */
- RW = 0,
- /* read only */
- RO = 1,
- /* write only */
- WO = 2,
- /* write input */
- WI = 2,
- /* clear after read */
- RAC = 3,
- /* write 1 -> clear */
- W1C = 4,
- /* write read input */
- WRI = 5,
- /* write input */
- RWI = 5,
- /* only scaler */
- R_W = 6,
- /* read & write for clear */
- RWC = 7,
- /* read & write as dual setting */
- RWS
-};
-
-struct vipx_reg {
- unsigned int offset;
- char *name;
-};
-
-struct vipx_field {
- char *name;
- unsigned int bit_start;
- unsigned int bit_width;
- enum regdata_type type;
-};
-
-struct vipx_clk {
- const char *name;
- struct clk *clk;
-};
-
-struct vipx_clk_ops {
- int (*clk_cfg)(struct vipx_exynos *exynos);
- int (*clk_on)(struct vipx_exynos *exynos);
- int (*clk_off)(struct vipx_exynos *exynos);
- int (*clk_dump)(struct vipx_exynos *exynos);
-};
-
-struct vipx_ctl_ops {
- int (*ctl_reset)(struct vipx_exynos *exynos, bool hold);
- int (*ctl_dump)(struct vipx_exynos *exynos, u32 instance);
- void * (*ctl_remap)(struct vipx_exynos *exynos, u32 instance);
- int (*ctl_unmap)(struct vipx_exynos *exynos, u32 instance, void *base);
- int (*ctl_trigger)(struct vipx_exynos *exynos, u32 chain_id);
- int (*ctl_start)(struct vipx_exynos *exynos, u32 chain_id);
-};
-
-struct vipx_exynos {
- struct device *dev;
- void **regbase;
- void *ram0base;
- void *ram1base;
- struct pinctrl *pinctrl;
- const struct vipx_clk_ops *clk_ops;
- const struct vipx_ctl_ops *ctl_ops;
-};
-
-int vipx_exynos_probe(struct vipx_exynos *exynos, struct device *dev,
- void **regs, void *ram0, void *ram1);
-
-void vipx_readl(void __iomem *base_addr, struct vipx_reg *reg, u32 *val);
-void vipx_writel(void __iomem *base_addr, struct vipx_reg *reg, u32 val);
-
-#define CLK_OP(exynos, op) (exynos->clk_ops ? exynos->clk_ops->op(exynos) : 0)
-#define CTL_OP(exynos, op, ...) (exynos->ctl_ops ? exynos->ctl_ops->op(exynos, ##__VA_ARGS__) : 0)
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VIPX_INTERFACE_H_
-#define VIPX_INTERFACE_H_
-
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/timer.h>
-
-#include "vipx-taskmgr.h"
-#include "vipx-slab.h"
-
-#define VIPX_WORK_MAX_COUNT 20
-#define VIPX_WORK_MAX_DATA 24
-#define VIPX_COMMAND_TIMEOUT (3 * HZ)
-
-typedef enum {
- VIPX_WORK_MTYPE_BLK,
- VIPX_WORK_MTYPE_NBLK,
-} vipx_work_msg_t;
-
-struct vipx_work {
- struct list_head list;
- u32 valid:1;
- u32 id;
- vipx_work_msg_t message;
- u32 work_param0;
- u32 work_param1;
- u32 work_param2;
- u32 work_param3;
- u8 data[VIPX_WORK_MAX_DATA];
-};
-
-struct vipx_work_list {
- struct vipx_work work[VIPX_WORK_MAX_COUNT];
- spinlock_t slock;
- struct list_head free_head;
- u32 free_cnt;
- struct list_head reply_head;
- u32 reply_cnt;
- wait_queue_head_t wait_queue;
-};
-
-enum vipx_interface_state {
- VIPX_ITF_STATE_OPEN,
- VIPX_ITF_STATE_BOOTUP,
- VIPX_ITF_STATE_ENUM,
- VIPX_ITF_STATE_START
-};
-
-struct vipx_interface {
- void *mbox;
- size_t mbox_size;
- void __iomem *regs;
- resource_size_t regs_size;
- unsigned long state;
-
- struct vipx_slab_allocator slab;
- struct vipx_taskmgr taskmgr;
- void *cookie;
- u32 done_cnt;
-
- struct vipx_task *request[VIPX_MAX_GRAPH];
- struct mutex request_barrier;
- struct vipx_work reply[VIPX_MAX_GRAPH];
- wait_queue_head_t reply_queue;
- struct vipx_task *process;
- spinlock_t process_barrier;
-
- struct vipx_work_list work_list;
- struct work_struct work_queue;
-
-#if defined(CONFIG_EXYNOS_EMUL_MBOX)
- struct timer_list timer;
-#endif
- void *private_data;
-};
-
-int vipx_interface_probe(struct vipx_interface *interface,
- struct device *dev,
- void __iomem *regs,
- resource_size_t regs_size,
- u32 irq0, u32 irq1);
-int vipx_interface_open(struct vipx_interface *interface,
- void *mbox, size_t mbox_size);
-int vipx_interface_close(struct vipx_interface *interface);
-int vipx_interface_start(struct vipx_interface *interface);
-int vipx_interface_stop(struct vipx_interface *interface);
-void vipx_interface_print(struct vipx_interface *interface);
-
-int vipx_hw_wait_bootup(struct vipx_interface *interface);
-
-int vipx_hw_enum(struct vipx_interface *interface);
-int vipx_hw_init(struct vipx_interface *interface, struct vipx_task *itask);
-int vipx_hw_deinit(struct vipx_interface *interface, struct vipx_task *itask);
-int vipx_hw_create(struct vipx_interface *interface, struct vipx_task *itask);
-int vipx_hw_destroy(struct vipx_interface *interface, struct vipx_task *itask);
-int vipx_hw_config(struct vipx_interface *interface, struct vipx_task *itask);
-int vipx_hw_process(struct vipx_interface *interface, struct vipx_task *itask);
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2016 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __VIPX_IO_H__
-#define __VIPX_IO_H__
-
-#include <linux/io.h>
-#include "vipx-config.h"
-
-#define IOR8(port) readb((const volatile void *)&port)
-#define IOR16(port) readw((const volatile void *)&port)
-#define IOR32(port) readl((const volatile void *)&port)
-#define IOR64(port) readq((const volatile void *)&port)
-
-#ifdef DBG_VERBOSE_IO
-#define IOW8(port, val) \
- do { pr_info("ADDR: %p, VAL: 0x%02x\r\n", \
- &port, val); writeb(val, &port);\
- } while (0)
-#define IOW16(port, val) \
- do { pr_info("ADDR: %p, VAL: 0x%04x\r\n", \
- &port, val); writew(val, &port);\
- } while (0)
-#define IOW32(port, val) \
- do { pr_info("ADDR: %p, VAL: 0x%08x\r\n", \
- &port, val); writel(val, &port);\
- } while (0)
-#define IOW64(port, val) \
- do { pr_info("ADDR: %p, VAL: 0x%016llx\r\n", \
- &port, val); writeq(val, &port);\
- } while (0)
-
-#else /* not VERBOSE_WRITE */
-#define IOW8(port, val) writeb(val, &port)
-#define IOW16(port, val) writew(val, &port)
-#define IOW32(port, val) writel(val, &port)
-#define IOW64(port, val) writeq(val, &port)
-#endif /* not VERBOSE_WRITE */
-
-void *mem2iocpy(void *dst,void *src, u32 size);
-void *io2memcpy(void *dst,void *src, u32 size);
-
-#endif /* __VIPXL_IO_H__ */
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VIPX_MAILBOX_H_
-#define VIPX_MAILBOX_H_
-
-#include <interface/ap_vip_if.h>
-
-#define VIPX_MAILBOX_SIGNATURE1 0xCAFE
-#define VIPX_MAILBOX_SIGNATURE2 0xEFAC
-#define VIPX_MAILBOX_BASEOFFSET 16
-
-#define VIPX_CMD_SIGNATURE 0x1234
-#define VIPX_DUM_SIGNATURE 0x5678
-
-#define MAX_MESSAGE_CNT 8
-
-#define DMESG() printk("[%s:%d]\n", __func__, __LINE__)
-
-enum {
- VIPX_MTYPE_H2F_NORMAL = 0,
- VIPX_MTYPE_H2F_URGENT = 1,
- VIPX_MTYPE_F2H_NORMAL = 0,
- VIPX_MTYPE_F2H_URGENT = 1,
- VIPX_MTYPE_MAX,
-};
-
-struct vipx_mailbox_h2f {
- volatile u32 wmsg_idx; /* Host permission is RW */
- volatile u32 rmsg_idx; /* Host permission is R_ONLY */
- vipx_msg_t msg[MAX_MESSAGE_CNT];
-};
-
-struct vipx_mailbox_f2h {
- volatile u32 wmsg_idx; /* Host permission is R_ONLY */
- volatile u32 rmsg_idx; /* Host permission is RW */
- vipx_msg_t msg[MAX_MESSAGE_CNT];
-};
-
-struct vipx_mailbox_stack {
- struct vipx_mailbox_h2f h2f;
- struct vipx_mailbox_f2h f2h;
-};
-
-struct vipx_mailbox_ctrl {
- struct vipx_mailbox_stack *stack;
- struct vipx_mailbox_stack *urgent_stack;
-};
-
-struct vipx_mailbox_stack * vipx_mbox_g_stack(void *mbox, u32 mbox_size);
-struct vipx_mailbox_stack * vipx_mbox_g_urgent_stack(void *mbox, u32 mbox_size);
-int vipx_mbox_ready(struct vipx_mailbox_ctrl *mctrl, size_t size, u32 type);
-int vipx_mbox_wait_reply(struct vipx_mailbox_ctrl *mctrl, u32 cmd, u32 type);
-int vipx_mbox_write(struct vipx_mailbox_ctrl *mctrl,
- void *payload, size_t size, u32 type, u32 gid, u32 cmd, u32 cid);
-int vipx_mbox_read(struct vipx_mailbox_ctrl *mctrl,
- void *payload, u32 type, void *debug_data);
-
-int emul_mbox_handler(struct vipx_mailbox_ctrl *mctrl);
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos5 SoC series VIPx driver
- *
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_MEM_H
-#define VIPX_MEM_H
-
-#include <linux/platform_device.h>
-#include <media/videobuf2-v4l2.h>
-#include <media/videobuf2-dma-sg.h>
-#include <linux/dma-buf.h>
-#include <linux/ion_exynos.h>
-
-
-#include "vipx-binary.h"
-
-/*
- * TEMP for test
- */
-#define VIPX_CM7_DRAM_BIN_SIZE (1024 * 1024 * 3)
-#define VIPX_MBOX_SIZE (1024 * 24)
-#define VIPX_DEBUG_SIZE (1024 * 1024 * 16)
-#define VIPX_CM7_HEAP_SIZE_PREVIEW (1024 * 1024 * 16)
-#define VIPX_CM7_HEAP_SIZE_ENF (1024 * 1024 * 64)
-#define VIPX_CM7_HEAP_SIZE_CAPTURE (1024 * 1024 * 128)
-
-#define VIPX_IOVA_DRAM_FIRMWARE 0xB8000000
-#define VIPX_IOVA_DRAM_MBOX 0xA9000000
-
-#define ION_FLAG_NON_CACHED 0
-
-
-struct vipx_vb2_buf;
-struct vipx_vb2_buf_ops {
- ulong (*plane_kvaddr)(struct vipx_vb2_buf *vbuf, u32 plane);
- ulong (*plane_cookie)(struct vipx_vb2_buf *vbuf, u32 plane);
- dma_addr_t (*plane_dvaddr)(struct vipx_vb2_buf *vbuf, u32 plane);
- void (*plane_prepare)(struct vipx_vb2_buf *vbuf, u32 plane,
- bool exact);
- void (*plane_finish)(struct vipx_vb2_buf *vbuf, u32 plane,
- bool exact);
- void (*buf_prepare)(struct vipx_vb2_buf *vbuf, bool exact);
- void (*buf_finish)(struct vipx_vb2_buf *vbuf, bool exact);
-};
-
-enum vipx_vbuf_cache_state {
- VIPX_VBUF_CACHE_INVALIDATE,
-};
-
-struct vipx_vb2_buf {
- struct vb2_v4l2_buffer vb;
- ulong kva[VIDEO_MAX_PLANES];
- dma_addr_t dva[VIDEO_MAX_PLANES];
-
- /* for cache operation */
- unsigned long cache_state;
- struct list_head cache_flush_list;
-
- const struct vipx_vb2_buf_ops *ops;
-};
-
-struct vipx_priv_buf;
-struct vipx_priv_buf_ops {
- void (*free)(struct vipx_priv_buf *pbuf);
- void *(*kvaddr)(struct vipx_priv_buf *pbuf);
- dma_addr_t (*dvaddr)(struct vipx_priv_buf *pbuf);
- phys_addr_t (*phaddr)(struct vipx_priv_buf *pbuf);
- void (*sync_for_device)(struct vipx_priv_buf *pbuf,
- off_t offset, size_t size,
- enum dma_data_direction dir);
- void (*sync_for_cpu)(struct vipx_priv_buf *pbuf,
- off_t offset, size_t size,
- enum dma_data_direction dir);
-};
-
-struct vipx_priv_buf {
- size_t size;
- size_t align;
- void *ctx;
- void *kvaddr;
-
- const struct vipx_priv_buf_ops *ops;
- void *priv;
- struct dma_buf *dma_buf;
- struct dma_buf_attachment *attachment;
- enum dma_data_direction direction;
- void *kva;
- dma_addr_t iova;
- struct sg_table *sgt;
-};
-
-#define vb_to_vipx_vb2_buf(x) \
- container_of(x, struct vipx_vb2_buf, vb)
-
-#define CALL_BUFOP(buf, op, args...) \
- ((buf)->ops->op ? (buf)->ops->op(args) : 0)
-
-#define CALL_PTR_BUFOP(buf, op, args...) \
- ((buf)->ops->op ? (buf)->ops->op(args) : NULL)
-
-#define CALL_VOID_BUFOP(buf, op, args...) \
- do { \
- if ((buf)->ops->op) \
- (buf)->ops->op(args); \
- } while (0)
-
-#define call_buf_op(buf, op, args...) \
- ((buf)->ops->op ? (buf)->ops->op((buf), args) : 0)
-
-struct vipx_mem_ops {
- void *(*init)(struct device *dev);
- void (*cleanup)(void *ctx);
- int (*resume)(void *ctx);
- void (*suspend)(void *ctx);
- void (*set_cached)(void *ctx, bool cacheable);
- int (*set_alignment)(void *ctx, size_t alignment);
- struct vipx_priv_buf *(*alloc)(void *ctx, size_t size, size_t align);
-};
-
-struct vipx_ion_ctx {
- struct device *dev;
- unsigned long alignment;
- long flags;
-
- /* protects iommu_active_cnt and protected */
- struct mutex lock;
- int iommu_active_cnt;
-};
-
-struct vipx_minfo {
- struct vipx_priv_buf *pb_debug;
- struct vipx_priv_buf *pb_heap;
-
- ulong kvaddr_debug_cnt;
-
- dma_addr_t paddr_fw;
- dma_addr_t dvaddr_fw;
- void *kvaddr_fw;
-
- dma_addr_t paddr_mbox;
- dma_addr_t dvaddr_mbox;
- void *kvaddr_mbox;
-
- dma_addr_t paddr_debug;
- dma_addr_t dvaddr_debug;
- void *kvaddr_debug;
-
- dma_addr_t paddr_heap;
- dma_addr_t dvaddr_heap;
- void *kvaddr_heap;
-};
-
-struct vipx_memory {
- struct device *dev;
- struct vipx_ion_ctx *default_ctx;
- struct vipx_ion_ctx *phcontig_ctx;
- const struct vipx_mem_ops *vipx_mem_ops;
- const struct vb2_mem_ops *vb2_mem_ops;
- const struct vipx_vb2_buf_ops *vipx_vb2_buf_ops;
- struct iommu_domain *iommu_domain;
-
- struct vipx_minfo info;
- void *priv;
- struct vipx_priv_buf *(*kmalloc)(size_t size, size_t align);
-};
-
-#define CALL_MEMOP(mem, op, args...) \
- ((mem)->vipx_mem_ops->op ? \
- (mem)->vipx_mem_ops->op(args) : 0)
-
-#define CALL_PTR_MEMOP(mem, op, args...) \
- ((mem)->vipx_mem_ops->op ? \
- (mem)->vipx_mem_ops->op(args) : NULL)
-
-#define CALL_VOID_MEMOP(mem, op, args...) \
- do { \
- if ((mem)->vipx_mem_ops->op) \
- (mem)->vipx_mem_ops->op(args); \
- } while (0)
-
-int vipx_memory_probe(struct vipx_memory *mem, struct device *dev);
-int vipx_memory_open(struct vipx_memory *mem);
-int vipx_memory_close(struct vipx_memory *mem);
-dma_addr_t vipx_allocate_heap(struct vipx_memory *mem, u32 size);
-void vipx_free_heap(struct vipx_memory *mem, struct vipx_binary *binary, u32 heap_size);
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VIPX_SLAB_H_
-#define VIPX_SLAB_H_
-
-#include <interface/ap_vip_if.h>
-
-#define VIPX_SLAB_MAX_LEVEL 1
-
-struct vipx_slab_allocator {
- u32 cache_size[VIPX_SLAB_MAX_LEVEL];
- struct kmem_cache *cache[VIPX_SLAB_MAX_LEVEL];
-};
-
-int vipx_slab_init(struct vipx_slab_allocator *allocator);
-int vipx_slab_alloc(struct vipx_slab_allocator *allocator, void **target, size_t size);
-int vipx_slab_free(struct vipx_slab_allocator *allocator, void *target, size_t size);
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#ifndef VIPX_TASKMGR_H_
-#define VIPX_TASKMGR_H_
-
-#include <linux/types.h>
-#include <linux/kthread.h>
-
-#include "vipx-config.h"
-#include "vipx-time.h"
-#include "vs4l.h"
-
-#define TASKMGR_IDX_0 (1 << 0 ) /* graphmgr thread */
-#define TASKMGR_IDX_1 (1 << 1 )
-#define TASKMGR_IDX_2 (1 << 2 )
-#define TASKMGR_IDX_3 (1 << 3 )
-#define TASKMGR_IDX_4 (1 << 4 )
-#define TASKMGR_IDX_5 (1 << 5 )
-#define TASKMGR_IDX_6 (1 << 6 )
-#define TASKMGR_IDX_7 (1 << 7 )
-#define TASKMGR_IDX_8 (1 << 8 )
-#define TASKMGR_IDX_9 (1 << 9 )
-#define TASKMGR_IDX_10 (1 << 10)
-#define TASKMGR_IDX_11 (1 << 11)
-#define TASKMGR_IDX_12 (1 << 12)
-#define TASKMGR_IDX_13 (1 << 13)
-#define TASKMGR_IDX_14 (1 << 14)
-#define TASKMGR_IDX_15 (1 << 15)
-#define TASKMGR_IDX_16 (1 << 16)
-#define TASKMGR_IDX_17 (1 << 17)
-#define TASKMGR_IDX_18 (1 << 18)
-#define TASKMGR_IDX_19 (1 << 19)
-#define TASKMGR_IDX_20 (1 << 20)
-#define TASKMGR_IDX_21 (1 << 21)
-#define TASKMGR_IDX_22 (1 << 22)
-#define TASKMGR_IDX_23 (1 << 23)
-#define TASKMGR_IDX_24 (1 << 24)
-#define TASKMGR_IDX_25 (1 << 25)
-#define TASKMGR_IDX_26 (1 << 26)
-#define TASKMGR_IDX_27 (1 << 27)
-#define TASKMGR_IDX_28 (1 << 28)
-#define TASKMGR_IDX_29 (1 << 29)
-#define TASKMGR_IDX_30 (1 << 30)
-#define TASKMGR_IDX_31 (1 << 31)
-
-#define taskmgr_e_barrier_irqs(taskmgr, index, flag) \
- taskmgr->sindex |= index; spin_lock_irqsave(&taskmgr->slock, flag)
-#define taskmgr_x_barrier_irqr(taskmgr, index, flag) \
- spin_unlock_irqrestore(&taskmgr->slock, flag); taskmgr->sindex &= ~index
-#define taskmgr_e_barrier_irq(taskmgr, index) \
- taskmgr->sindex |= index; spin_lock_irq(&taskmgr->slock)
-#define taskmgr_x_barrier_irq(taskmgr, index) \
- spin_unlock_irq(&taskmgr->slock); taskmgr->sindex &= ~index
-#define taskmgr_e_barrier(taskmgr, index) \
- taskmgr->sindex |= index; spin_lock(&taskmgr->slock)
-#define taskmgr_x_barrier(taskmgr, index) \
- spin_unlock(&taskmgr->slock); taskmgr->sindex &= ~index
-
-enum vipx_task_state {
- VIPX_TASK_STATE_FREE = 1,
- VIPX_TASK_STATE_REQUEST,
- VIPX_TASK_STATE_PREPARE,
- VIPX_TASK_STATE_PROCESS,
- VIPX_TASK_STATE_COMPLETE,
- VIPX_TASK_STATE_INVALID
-};
-
-enum vipx_task_flag {
- VIPX_TASK_FLAG_IOCPY = 16
-};
-
-enum vipx_task_message {
- VIPX_TASK_INIT = 1,
- VIPX_TASK_DEINIT,
- VIPX_TASK_CREATE,
- VIPX_TASK_DESTROY,
- VIPX_TASK_ALLOCATE,
- VIPX_TASK_PROCESS,
- VIPX_TASK_REQUEST = 10,
- VIPX_TASK_DONE,
- VIPX_TASK_NDONE
-};
-
-enum vipx_control_message {
- VIPX_CTRL_NONE = 100,
- VIPX_CTRL_STOP,
- VIPX_CTRL_STOP_DONE
-};
-
-struct vipx_task {
- struct list_head list;
- struct kthread_work work;
- u32 state;
-
- u32 message;
- ulong param0;
- ulong param1;
- ulong param2;
- ulong param3;
-
- struct vb_container_list *incl;
- struct vb_container_list *otcl;
- ulong flags;
-
- u32 id;
- struct mutex *lock;
- u32 index;
- u32 findex;
- u32 tdindex;
- void *owner;
-
- struct vipx_time time[VIPX_TMP_COUNT];
-};
-
-struct vipx_taskmgr {
- u32 id;
- u32 sindex;
- spinlock_t slock;
- struct vipx_task task[VIPX_MAX_TASK];
-
- struct list_head fre_list;
- struct list_head req_list;
- struct list_head pre_list;
- struct list_head pro_list;
- struct list_head com_list;
-
- u32 tot_cnt;
- u32 fre_cnt;
- u32 req_cnt;
- u32 pre_cnt;
- u32 pro_cnt;
- u32 com_cnt;
-};
-
-void vipx_task_s_free(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_g_free(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_free_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_free_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_print_free_list(struct vipx_taskmgr *taskmgr);
-
-void vipx_task_s_request(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_g_request(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_request_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_request_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_print_request_list(struct vipx_taskmgr *taskmgr);
-
-void vipx_task_s_prepare(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_g_prepare(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_prepare_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_prepare_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_print_prepare_list(struct vipx_taskmgr *taskmgr);
-
-void vipx_task_s_process(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_g_process(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_process_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_process_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_print_process_list(struct vipx_taskmgr *taskmgr);
-
-void vipx_task_s_complete(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_g_complete(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_complete_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_complete_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_print_complete_list(struct vipx_taskmgr *taskmgr);
-
-void vipx_task_trans_fre_to_req(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_req_to_pre(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_req_to_pro(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_req_to_com(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_req_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_pre_to_pro(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_pre_to_com(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_pre_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_pro_to_com(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_pro_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_com_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-void vipx_task_trans_any_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task);
-
-void vipx_task_pick_fre_to_req(struct vipx_taskmgr *taskmgr, struct vipx_task **task);
-void vipx_task_print_all(struct vipx_taskmgr *taskmgr);
-
-int vipx_task_init(struct vipx_taskmgr *taskmgr, void *owner);
-int vipx_task_deinit(struct vipx_taskmgr *taskmgr);
-void vipx_task_flush(struct vipx_taskmgr *taskmgr);
-
-#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/ktime.h>
-#include <linux/timekeeping.h>
-
-enum vipx_time_measure_point {
- VIPX_TMP_QUEUE,
- VIPX_TMP_REQUEST,
- VIPX_TMP_RESOURCE,
- VIPX_TMP_PROCESS,
- VIPX_TMP_DONE,
- VIPX_TMP_COUNT
-};
-
-struct vipx_time {
- struct timeval time;
-};
-
-void vipx_get_timestamp(struct vipx_time *time);
-
-#define VIPX_TIME_IN_US(v) ((v).time.tv_sec * 1000000 + (v).time.tv_usec)
+++ /dev/null
-config EXYNOS_VIPX_INTERFACE
- bool "Select Interface"
- help
- This is VIPx interface
-
-config EXYNOS_VIPX_HARDWARE
- bool "Use Hardware"
- depends on EXYNOS_VIPX_INTERFACE
- help
- This is hardware device
-
-choice
- prompt "Select Message box type"
- depends on EXYNOS_VIPX_INTERFACE
- help
- Select VIP(x) message box type.
-
-config EXYNOS_VIPX_MBOX
- bool "Use VIPX MBOX"
- depends on EXYNOS_VIPX_INTERFACE
- help
- This is message box device
-
-config EXYNOS_EMUL_MBOX
- bool "Use EMUL MBOX"
- depends on EXYNOS_VIPX_INTERFACE
- help
- This is message box emulator
-endchoice
+++ /dev/null
-obj-$(CONFIG_EXYNOS_VIPX_HARDWARE) += hardware/
-obj-$(CONFIG_EXYNOS_VIPX_HARDWARE) += vipx-slab.o
-
-EXTRA_CFLAGS += -Idrivers/vision/include -Idrivers/vision/vipx/include
+++ /dev/null
-obj-y += vipx-interface.o
-obj-$(CONFIG_EXYNOS_VIPX_MBOX) += vipx-mailbox.o
-obj-$(CONFIG_EXYNOS_EMUL_MBOX) += emul-mailbox.o
-EXTRA_CFLAGS += -Idrivers/vision/include -Idrivers/vision/vipx/include
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-
-#include "vipx-config.h"
-#include "vipx-io.h"
-#include "vipx-mailbox.h"
-#include "vipx-interface.h"
-
-#include <interface/ap_vip_if.h>
-
-#define INDEX(x) (x % MAX_MESSAGE_CNT)
-
-struct vipx_mailbox_stack * vipx_mbox_g_stack(void *mbox, u32 mbox_size)
-{
- vipx_info("sizeof mbox stack (%lx) %p\n", sizeof(struct vipx_mailbox_stack), mbox);
- return (struct vipx_mailbox_stack *)mbox;
-}
-
-struct vipx_mailbox_stack * vipx_mbox_g_urgent_stack(void *mbox, u32 mbox_size)
-{
- vipx_info("%p\n", ((char *)mbox + sizeof(struct vipx_mailbox_stack)));
- return (struct vipx_mailbox_stack *)((char *)mbox + sizeof(struct vipx_mailbox_stack));
-}
-
-static u32 __vipx_mbox_g_freesize(struct vipx_mailbox_h2f *mbox)
-{
- u32 wmsg_idx = 0;
- u32 rmsg_idx = 0;
- u32 free_size = 0;
-
- BUG_ON(!mbox);
-
- wmsg_idx = mbox->wmsg_idx;
- rmsg_idx = mbox->rmsg_idx;
-
- free_size = MAX_MESSAGE_CNT - (wmsg_idx - rmsg_idx);
-
- BUG_ON(free_size < 0);
-
- return free_size;
-}
-
-int vipx_mbox_ready(struct vipx_mailbox_ctrl *mctrl,
- size_t size, u32 type)
-{
- u32 try_count;
- struct vipx_mailbox_h2f *mbox;
- u16 free_size16;
-
- BUG_ON(!mctrl);
- BUG_ON(!IS_ALIGNED(size, 2));
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->h2f;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->h2f;
- } else {
- vipx_err("invalid type(%d)\n", type);
- return -EINVAL;
- }
- try_count = 1000;
-
- free_size16 = __vipx_mbox_g_freesize(mbox);
- while (--try_count && (free_size16 == 0)) {
- vipx_warn("mbox is not ready(freesize %d)...(%d)\n",
- free_size16, try_count);
- udelay(10);
- free_size16 = __vipx_mbox_g_freesize(mbox);
- }
-
- if (try_count)
- return 0;
- else
- return -EBUSY;
-}
-
-int vipx_mbox_wait_reply(struct vipx_mailbox_ctrl *mctrl,
- u32 cmd, u32 type)
-{
- int ret = 0;
- struct vipx_mailbox_f2h *mbox;
- int try_count;
-
- if (type == VIPX_MTYPE_F2H_NORMAL) {
- mbox = &mctrl->stack->f2h;
- } else if (type == VIPX_MTYPE_F2H_URGENT) {
- mbox = &mctrl->urgent_stack->f2h;
- } else {
- vipx_err("cmd(%d) has invalid type(%d)\n", cmd, type);
- ret = -EINVAL;
- goto p_err;
- }
-
- try_count = 1000;
- while (--try_count && (mbox->wmsg_idx == mbox->rmsg_idx)) {
- vipx_info("waiting vipx reply(%d, %d)...(%d)\n", mbox->wmsg_idx, mbox->rmsg_idx, try_count);
- msleep(1);
- }
-
- if (try_count <= 0) {
- vipx_err("waiting vipx reply is timeout\n");
- ret = -EINVAL;
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-int vipx_mbox_write(struct vipx_mailbox_ctrl *mctrl,
- void *payload, size_t size, u32 type, u32 gid, u32 cmd, u32 cid)
-{
- int ret = 0;
- struct vipx_mailbox_h2f *mbox;
- u32 wmsg_idx;
- u32 *wptr;
-
- BUG_ON(!mctrl);
- BUG_ON(!payload);
- BUG_ON(!IS_ALIGNED(size, 2));
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->h2f;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->h2f;
- } else {
- vipx_err("invalid type(%d)\n", type);
- ret = -EINVAL;
- goto p_err;
- }
- wmsg_idx = mbox->wmsg_idx;
- wptr = (void *)&mbox->msg[wmsg_idx % MAX_MESSAGE_CNT];
-
- mem2iocpy(wptr, payload, size);
-
-#ifdef DBG_MAILBOX_CORE
- vipx_info("[I%d][MBOX][H2F] %d command(%d, %d, %d)\n", gid,
- cmd, cid, wmsg_idx, mbox->rmsg_idx);
-#endif
-
- /* increment actual write pointer */
- mbox->wmsg_idx++;
-
-p_err:
- return ret;
-}
-
-int vipx_mbox_read(struct vipx_mailbox_ctrl *mctrl,
- void *payload, u32 type, void *debug_data)
-{
- int ret = 0;
- struct vipx_mailbox_f2h *mbox;
- u32 rmsg_idx;
- u32 *rptr;
-
- BUG_ON(!mctrl);
- BUG_ON(!payload);
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->f2h;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->f2h;
- } else {
- vipx_err("invalid type(%d)\n", type);
- ret = -EINVAL;
- goto p_err;
- }
- rmsg_idx = mbox->rmsg_idx;
- rptr = (void *)&mbox->msg[rmsg_idx % MAX_MESSAGE_CNT];
-
- io2memcpy(payload, rptr, sizeof(vipx_msg_t));
-
- mbox->rmsg_idx++;
-
-p_err:
- return ret;
-}
-
-/* emul mbox */
-
-static u32 __emul_mbox_g_freesize(struct vipx_mailbox_f2h *mbox)
-{
- u32 wmsg_idx = 0;
- u32 rmsg_idx = 0;
- u32 free_size = 0;
-
- BUG_ON(!mbox);
-
- wmsg_idx = mbox->wmsg_idx;
- rmsg_idx = mbox->rmsg_idx;
-
- free_size = MAX_MESSAGE_CNT - (wmsg_idx - rmsg_idx);
-
- BUG_ON(free_size < 0);
-
- return free_size;
-}
-
-int emul_mbox_ready(struct vipx_mailbox_ctrl *mctrl, u32 type)
-{
- u32 try_count;
- struct vipx_mailbox_f2h *mbox;
- u32 free_size;
-
- BUG_ON(!mctrl);
-
- vipx_info("________ %d\n", type);
-
- if (type == VIPX_MTYPE_F2H_NORMAL) {
- mbox = &mctrl->stack->f2h;
- } else if (type == VIPX_MTYPE_F2H_URGENT) {
- mbox = &mctrl->urgent_stack->f2h;
- } else {
- vipx_err("invalid type(%d)\n", type);
- return -EINVAL;
- }
- try_count = 1000;
-
- free_size = __emul_mbox_g_freesize(mbox);
- while (--try_count && (free_size == 0)) {
- vipx_warn("mbox is not ready(freesize %d)...(%d)\n",
- free_size, try_count);
- udelay(10);
- free_size = __emul_mbox_g_freesize(mbox);
- }
-
- if (try_count)
- return 0;
- else
- return -EBUSY;
-}
-
-int emul_mbox_check_message_from_host(struct vipx_mailbox_ctrl *mctrl, u32 type)
-{
- int ret = 0;
- u32 wmsg_idx, rmsg_idx;
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- wmsg_idx = mctrl->stack->h2f.wmsg_idx;
- rmsg_idx = mctrl->stack->h2f.rmsg_idx;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- wmsg_idx = mctrl->urgent_stack->h2f.wmsg_idx;
- rmsg_idx = mctrl->urgent_stack->h2f.rmsg_idx;
- } else {
- vipx_err("invalid type(%d)\n", type);
- ret = -EINVAL;
- goto p_err;
- }
-
- if ((wmsg_idx == rmsg_idx)) {
- ret = -EINVAL;
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-int emul_mbox_write(struct vipx_mailbox_ctrl *mctrl,
- void *payload, size_t size, u32 type)
-{
- struct vipx_mailbox_f2h *mbox;
- u32 wmsg_idx;
- u32 *wptr;
-
- BUG_ON(!mctrl);
- BUG_ON(!payload);
- BUG_ON(!IS_ALIGNED(size, 2));
-
- if (type == VIPX_MTYPE_F2H_NORMAL) {
- mbox = &mctrl->stack->f2h;
- } else if (type == VIPX_MTYPE_F2H_URGENT) {
- mbox = &mctrl->urgent_stack->f2h;
- } else {
- vipx_err("invalid type(%d)\n", type);
- return -EINVAL;
- }
- wmsg_idx = mbox->wmsg_idx;
- wptr = (void *)&mbox->msg[wmsg_idx % MAX_MESSAGE_CNT];
-
- mem2iocpy(wptr, payload, size);
-
- /* increment actual write pointer */
- mbox->wmsg_idx++;
-
- return 0;
-}
-
-int emul_mbox_read(struct vipx_mailbox_ctrl *mctrl, void *payload, u32 type)
-{
- int ret = 0;
- struct vipx_mailbox_h2f *mbox;
- u32 rmsg_idx;
- u32 *rptr;
-
- BUG_ON(!mctrl);
- BUG_ON(!payload);
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->h2f;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->h2f;
- } else {
- vipx_err("invalid type(%d)\n", type);
- ret = -EINVAL;
- goto p_err;
- }
- rmsg_idx = mbox->rmsg_idx;
- rptr = (void *)&mbox->msg[rmsg_idx % MAX_MESSAGE_CNT];
-
- io2memcpy(payload, rptr, sizeof(vipx_msg_t));
-
- mbox->rmsg_idx++;
-
-p_err:
- return ret;
-}
-
-int emul_mbox_handler(struct vipx_mailbox_ctrl *mctrl)
-{
- int ret = 0;
- int has_urgent = 0;
- int has_normal = 0;
-
- vipx_msg_t payload;
-
- /*
- * Handle urgent
- */
-
- /* read h2f mbox */
- ret = emul_mbox_check_message_from_host(mctrl, VIPX_MTYPE_H2F_URGENT);
- if (ret)
- goto p_normal;
-
- ret = emul_mbox_read(mctrl, &payload, VIPX_MTYPE_H2F_URGENT);
-
- /* handle message */
-
- /* write f2h mbox */
- ret = emul_mbox_ready(mctrl, VIPX_MTYPE_F2H_URGENT);
- if (ret) {
- vipx_err("emul_mbox_ready is failed(%d)\n", ret);
- goto p_normal;
- }
-
- ret = emul_mbox_write(mctrl, &payload, sizeof(vipx_msg_t), VIPX_MTYPE_F2H_URGENT);
- if (ret) {
- vipx_err("emul_mbox_write is failed(%d)\n", ret);
- goto p_normal;
- }
-
- has_urgent = 1;
-
-p_normal:
- /*
- * Handle normal
- */
-
- /* read h2f mbox */
- ret = emul_mbox_check_message_from_host(mctrl, VIPX_MTYPE_H2F_NORMAL);
- if (ret)
- goto p_err;
-
- ret = emul_mbox_read(mctrl, &payload, VIPX_MTYPE_H2F_NORMAL);
-
- /* handle message */
-
- /* write f2h mbox */
- ret = emul_mbox_ready(mctrl, VIPX_MTYPE_F2H_NORMAL);
- if (ret) {
- vipx_err("emul_mbox_ready is failed(%d)\n", ret);
- goto p_err;
- }
-
- ret = emul_mbox_write(mctrl, &payload, sizeof(vipx_msg_t), VIPX_MTYPE_F2H_NORMAL);
- if (ret) {
- vipx_err("emul_mbox_write is failed(%d)\n", ret);
- goto p_err;
- }
-
- has_normal = 1;
-
-p_err:
- if (has_urgent || has_normal) {
- vipx_info("________ %d %d exit\n", has_urgent, has_normal);
- }
-
- return (has_urgent || has_normal) ? 0 : -1;
-}
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-
-#include "vipx-config.h"
-#include "vipx-interface.h"
-#include "vipx-memory.h"
-#include "vipx-mailbox.h"
-#include "vipx-io.h"
-#include "../vipx-graphmgr.h"
-#include "../vipx-device.h"
-
-#include <interface/ap_vip_if.h>
-
-#define TURN_AROUND_TIME (HZ/20)
-
-#define enter_request_barrier(task) mutex_lock(task->lock);
-#define exit_request_barrier(task) mutex_unlock(task->lock);
-#define init_process_barrier(itf) spin_lock_init(&itf->process_barrier);
-#define enter_process_barrier(itf) spin_lock_irq(&itf->process_barrier);
-#define exit_process_barrier(itf) spin_unlock_irq(&itf->process_barrier);
-
-#ifdef DBG_INTERFACE_ISR
-#define DEBUG_ISR vipx_info
-#else
-#define DEBUG_ISR vipx_dbg
-#endif
-
-/* #define NON_BLOCK_INVOKE */
-
-atomic_t checker;
-
-int __get_size_from(struct vipx_format *format, unsigned int plane, int *width, int *height)
-{
- int ret = 0;
-
- switch (format->format) {
- case VS4L_DF_IMAGE_U8:
- case VS4L_DF_IMAGE_U16:
- case VS4L_DF_IMAGE_RGB:
- case VS4L_DF_IMAGE_U32:
- *width = format->width;
- *height = format->height;
- break;
- case VS4L_DF_IMAGE_NV21:
- if (plane == 0) {
- *width = format->width;
- *height = format->height;
- } else if (plane == 1) {
- *width = format->width;
- *height = format->height / 2;
- } else {
- vipx_err("Invalid plane(%d) for NV21 format\n", plane);
- ret = -1;
- }
- break;
- case VS4L_DF_IMAGE_YV12:
- case VS4L_DF_IMAGE_I420:
- if (plane == 0) {
- *width = format->width;
- *height = format->height;
- } else if (plane == 1) {
- *width = format->width / 2;
- *height = format->height / 2;
- } else if (plane == 2) {
- *width = format->width / 2;
- *height = format->height / 2;
- } else {
- vipx_err("Invalid plane(%d) for YV12/I420 format\n", plane);
- ret = -1;
- }
- break;
- case VS4L_DF_IMAGE_I422:
- if (plane == 0) {
- *width = format->width;
- *height = format->height;
- } else if (plane == 1) {
- *width = format->width / 2;
- *height = format->height;
- } else if (plane == 2) {
- *width = format->width / 2;
- *height = format->height;
- } else {
- vipx_err("Invalid plane(%d) for YV12/I422 format\n", plane);
- ret = -1;
- }
- break;
- default:
- vipx_err("invalid format(%d)\n", format->format);
- ret = -1;
- break;
- }
-
- return ret;
-}
-
-static void WORK_TO_FREE(struct vipx_work_list *work_list, struct vipx_work *work)
-{
- unsigned long flags;
-
- BUG_ON(!work_list);
- BUG_ON(!work);
-
- spin_lock_irqsave(&work_list->slock, flags);
- list_add_tail(&work->list, &work_list->free_head);
- work_list->free_cnt++;
- spin_unlock_irqrestore(&work_list->slock, flags);
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static void WORK_FR_FREE(struct vipx_work_list *work_list, struct vipx_work **work)
-{
- unsigned long flags;
-
- BUG_ON(!work_list);
- BUG_ON(!work);
-
- spin_lock_irqsave(&work_list->slock, flags);
- if (work_list->free_cnt) {
- *work = container_of(work_list->free_head.next, struct vipx_work, list);
- list_del(&(*work)->list);
- work_list->free_cnt--;
- } else {
- *work = NULL;
- }
- spin_unlock_irqrestore(&work_list->slock, flags);
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static void WORK_TO_REPLY(struct vipx_work_list *work_list, struct vipx_work *work)
-{
- unsigned long flags;
-
- BUG_ON(!work_list);
- BUG_ON(!work);
-
- spin_lock_irqsave(&work_list->slock, flags);
- list_add_tail(&work->list, &work_list->reply_head);
- work_list->reply_cnt++;
- spin_unlock_irqrestore(&work_list->slock, flags);
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static void WORK_FR_REPLY(struct vipx_work_list *work_list, struct vipx_work **work)
-{
- unsigned long flags;
-
- BUG_ON(!work_list);
- BUG_ON(!work);
-
- spin_lock_irqsave(&work_list->slock, flags);
- if (work_list->reply_cnt) {
- *work = container_of(work_list->reply_head.next, struct vipx_work, list);
- list_del(&(*work)->list);
- work_list->reply_cnt--;
- } else {
- *work = NULL;
- }
- spin_unlock_irqrestore(&work_list->slock, flags);
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static void INIT_WORK_LIST(struct vipx_work_list *work_list, u32 count)
-{
- u32 i;
-
- work_list->free_cnt = 0;
- work_list->reply_cnt = 0;
- INIT_LIST_HEAD(&work_list->free_head);
- INIT_LIST_HEAD(&work_list->reply_head);
- spin_lock_init(&work_list->slock);
- init_waitqueue_head(&work_list->wait_queue);
- for (i = 0; i < count; ++i)
- WORK_TO_FREE(work_list, &work_list->work[i]);
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static inline void __set_reply(struct vipx_interface *interface, u32 graph_idx)
-{
- BUG_ON(graph_idx >= VIPX_MAX_GRAPH);
- interface->reply[graph_idx].valid = 1;
- wake_up(&interface->reply_queue);
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static inline void __clr_reply(struct vipx_interface *interface, u32 graph_idx)
-{
- BUG_ON(graph_idx >= VIPX_MAX_GRAPH);
- interface->reply[graph_idx].valid = 0;
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static int __wait_reply(struct vipx_interface *interface, u32 graph_idx)
-{
- int ret = 0;
- int remain_time;
- u32 cmd, type;
-
- BUG_ON(!interface);
- BUG_ON(graph_idx >= VIPX_MAX_GRAPH);
- BUG_ON(!interface->request[graph_idx]);
-
- cmd = interface->request[graph_idx]->message;
- type = interface->request[graph_idx]->param3;
-
-#ifdef NON_BLOCK_INVOKE
- remain_time = wait_event_timeout(interface->reply_queue,
- interface->reply[graph_idx].valid, VIPX_COMMAND_TIMEOUT);
-#else
- remain_time = wait_event_timeout(interface->reply_queue,
- interface->reply[graph_idx].valid, VIPX_COMMAND_TIMEOUT * 2);
-#endif
- if (!remain_time) {
- vipx_err("[GID:%d] %d command[type: %d] : reply is timeout\n", graph_idx, cmd, type);
- ret = -ETIME;
- goto p_err;
- }
-
-p_err:
-
- vipx_dbg("%s:%d\n", __func__, ret);
- return ret;
-}
-
-static void __send_interrupt(struct vipx_interface *interface)
-{
-#ifndef CONFIG_EXYNOS_EMUL_MBOX
- u32 try_count;
- u32 type, offset, val;
-
- BUG_ON(!interface);
- BUG_ON(!interface->process);
-
- type = interface->process->param3;
- offset = (type == VIPX_MTYPE_H2F_NORMAL) ? 0x10 : 0x0C;
-
-
- /* Check interrupt clear */
- try_count = 100;
- val = readl(interface->regs + offset);
- while (--try_count && val) {
- vipx_warn("waiting interrupt clear(%d)...(%d)\n", val, try_count);
- val = readl(interface->regs + offset);
- }
-
- DEBUG_ISR("[DEBUG ISR] interrupt generate (%x)\n", offset);
-
- /* Raise interrupt */
- writel(0x100, interface->regs + offset);
-#endif
-
- vipx_dbg("%s:\n", __func__);
-}
-
-static int __vipx_set_cmd(struct vipx_interface *interface,
- struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- struct vipx_mailbox_ctrl *mctrl;
- void *payload;
- u32 cmd, gidx, cid, size, type;
- ulong flag;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
- BUG_ON(!itask->lock);
- BUG_ON(itask->param1 >= VIPX_MAX_GRAPH);
-
- itaskmgr = &interface->taskmgr;
- mctrl = interface->private_data;
- cmd = itask->message;
- payload = (void *)itask->param0;
- gidx = itask->param1;
- size = itask->param2;
- type = itask->param3;
- cid = itask->index;
-
- enter_request_barrier(itask);
- interface->request[gidx] = itask;
-
- enter_process_barrier(interface);
- interface->process = itask;
-
- ret = vipx_mbox_ready(mctrl, size, type);
- if (ret) {
- interface->process = NULL;
- exit_process_barrier(interface);
- interface->request[gidx] = NULL;
- exit_request_barrier(itask);
- vipx_err("vipx_mbox_ready is fail(%d)", ret);
- goto p_err;
- }
-
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_req_to_pro(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- ret = vipx_mbox_write(mctrl, payload, size, type, gidx, cmd, cid);
- if (ret) {
- interface->process = NULL;
- exit_process_barrier(interface);
- interface->request[gidx] = NULL;
- exit_request_barrier(itask);
- pr_err("vipx_mbox_write_request fail (%d)\n", ret);
- goto p_err;
- }
-
- atomic_inc(&checker);
-
- __send_interrupt(interface);
-
- DEBUG_ISR("[I%ld][MBOX] CMD : %d, ID : %d\n", itask->param1, cmd, cid);
-
- interface->process = NULL;
- exit_process_barrier(interface);
-
- ret = __wait_reply(interface, gidx);
- if (ret) {
- interface->request[gidx] = NULL;
- exit_request_barrier(itask);
- vipx_err("%d command is timeout", cmd);
- ret = -ETIME;
- goto p_err;
- }
-
- if (interface->reply[gidx].work_param1) {
- interface->request[gidx] = NULL;
- exit_request_barrier(itask);
- vipx_err("%d command is error(%d)\n", cmd, interface->reply[gidx].work_param1);
- ret = interface->reply[gidx].work_param1;
- goto p_err;
- }
-
- __clr_reply(interface, gidx);
-
- interface->request[gidx] = NULL;
- exit_request_barrier(itask);
-
-p_err:
-
- vipx_dbg("%s:%d\n", __func__, ret);
- return ret;
-}
-
-#ifdef NON_BLOCK_INVOKE
-static int __vipx_set_cmd_nblk(struct vipx_interface *interface,
- struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- struct vipx_mailbox_ctrl *mctrl;
- void *payload;
- u32 cmd, gidx, cid, size, type;
- ulong flag;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
- mctrl = interface->private_data;
- cmd = itask->message;
- payload = (void *)itask->param0;
- gidx = itask->param1;
- size = itask->param2;
- type = itask->param3;
- cid = itask->index;
-
- enter_process_barrier(interface);
- interface->process = itask;
-
- ret = vipx_mbox_ready(mctrl, size, type);
- if (ret) {
- interface->process = NULL;
- exit_process_barrier(interface);
- vipx_err("vipx_mbox_ready is fail(%d)", ret);
- goto p_err;
- }
-
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_req_to_pro(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- ret = vipx_mbox_write(mctrl, payload, size, type, gidx, cmd, cid);
- if (ret) {
- interface->process = NULL;
- exit_process_barrier(interface);
- pr_err("vipx_mbox_write_request fail (%d)\n", ret);
- goto p_err;
- }
-
- atomic_inc(&checker);
-
- __send_interrupt(interface);
-
- DEBUG_ISR("[I%ld][MBOX] CMD : %d, ID : %d\n", itask->param1, cmd, cid);
-
- interface->process = NULL;
- exit_process_barrier(interface);
-
-p_err:
-
- vipx_dbg("%s:%d\n", __func__, ret);
- return ret;
-}
-#endif
-
-static int __vipx_interface_cleanup(struct vipx_interface *interface)
-{
- int ret = 0;
- struct vipx_taskmgr *taskmgr;
- struct vipx_task *task;
- ulong flags;
- u32 i;
-
- taskmgr = &interface->taskmgr;
-
- if (taskmgr->req_cnt) {
- vipx_err("[ITF] request count is NOT zero(%d)\n", taskmgr->req_cnt);
- ret += taskmgr->req_cnt;
- }
-
- if (taskmgr->pro_cnt) {
- vipx_err("[ITF] process count is NOT zero(%d)\n", taskmgr->pro_cnt);
- ret += taskmgr->pro_cnt;
- }
-
- if (taskmgr->com_cnt) {
- vipx_err("[ITF] complete count is NOT zero(%d)\n", taskmgr->com_cnt);
- ret += taskmgr->com_cnt;
- }
-
- if (ret) {
- taskmgr_e_barrier_irqs(taskmgr, 0, flags);
-
- for (i = 1; i < taskmgr->tot_cnt; ++i) {
- task = &taskmgr->task[i];
- if (task->state == VIPX_TASK_STATE_FREE)
- continue;
-
- vipx_task_trans_any_to_fre(taskmgr, task);
- ret--;
- }
-
- taskmgr_x_barrier_irqr(taskmgr, 0, flags);
- }
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-void vipx_interface_print(struct vipx_interface *interface)
-{
- DLOG_INIT();
- struct vipx_taskmgr *itaskmgr;
- struct vipx_task *itask, *itemp;
-
- BUG_ON(!interface);
-
- itaskmgr = &interface->taskmgr;
-
- DLOG("REQUEST LIST(%d) :", itaskmgr->req_cnt);
- list_for_each_entry_safe(itask, itemp, &itaskmgr->req_list, list) {
- DLOG(" %d(%d, %d)", itask->index, itask->param1, itask->message);
- }
- vipx_info("%s\n", DLOG_OUT());
-
- DLOG("PROCESS LIST(%d) :", itaskmgr->pro_cnt);
- list_for_each_entry_safe(itask, itemp, &itaskmgr->pro_list, list) {
- DLOG(" %d(%d, %d)", itask->index, itask->param1, itask->message);
- }
- vipx_info("%s\n", DLOG_OUT());
-
- DLOG("COMPLETE LIST(%d) :", itaskmgr->com_cnt);
- list_for_each_entry_safe(itask, itemp, &itaskmgr->com_list, list) {
- DLOG(" %d(%d, %d)", itask->index, itask->param1, itask->message);
- }
- vipx_info("%s\n", DLOG_OUT());
-}
-
-static irqreturn_t interface_isr(int irq, void *data)
-{
- int ret = 0;
- struct vipx_interface *interface;
- struct vipx_mailbox_ctrl *mctrl;
- struct vipx_mailbox_f2h *mbox;
- struct work_struct *work_queue;
- struct vipx_work_list *work_list;
- struct vipx_work *work;
- vipx_msg_t msg_rsp;
-
- interface = (struct vipx_interface *)data;
- mctrl = interface->private_data;
- work_queue = &interface->work_queue;
- work_list = &interface->work_list;
-
- mbox = &mctrl->urgent_stack->f2h;
- DEBUG_ISR("[DEBUG_ISR] status of urgent mbox w(%d) r(%d)\n", mbox->wmsg_idx, mbox->rmsg_idx);
- while (mbox->wmsg_idx != mbox->rmsg_idx) {
- ret = vipx_mbox_read(mctrl, &msg_rsp, VIPX_MTYPE_H2F_URGENT, data);
- if (ret) {
- vipx_err("vipx_mbox_read is fail(%d)\n", ret);
- break;
- }
- DEBUG_ISR("[DEBUG_ISR] read urgent mbox transid %d\n", msg_rsp.transId);
-
- if (msg_rsp.type == BOOTUP_RSP) {
- if (msg_rsp.msg_u.bootup_rsp.error == 0) {
- vipx_info("CM7 bootup complete %d\n", msg_rsp.transId);
- set_bit(VIPX_ITF_STATE_BOOTUP, &interface->state);
- }
- break;
- }
-
- atomic_dec(&checker);
-
- WORK_FR_FREE(work_list, &work);
- if (work) {
- /* TODO */
- work->id = msg_rsp.transId;
- work->message = 0;
- work->work_param0 = 0;
- work->work_param1 = 0;
- work->work_param2 = 0;
- work->work_param3 = 0;
-
- WORK_TO_REPLY(work_list, work);
- if (!work_pending(work_queue))
- schedule_work(work_queue);
- } else {
- vipx_err("free work is empty\n");
- break;
- }
- break;
- }
-
- mbox = &mctrl->stack->f2h;
- DEBUG_ISR("[DEBUG_ISR] status of normal mbox w(%d) r(%d)\n", mbox->wmsg_idx, mbox->rmsg_idx);
- while (mbox->wmsg_idx != mbox->rmsg_idx) {
- ret = vipx_mbox_read(mctrl, &msg_rsp, VIPX_MTYPE_H2F_NORMAL, data);
- if (ret) {
- vipx_err("vipx_mbox_read is fail(%d)\n", ret);
- break;
- }
- DEBUG_ISR("[DEBUG_ISR] read normal mbox transid %d\n", msg_rsp.transId);
-
- atomic_dec(&checker);
-
- WORK_FR_FREE(work_list, &work);
- if (work) {
- /* TODO */
- work->id = msg_rsp.transId;
- work->message = 0;
- work->work_param0 = 0;
- work->work_param1 = 0;
- work->work_param2 = 0;
- work->work_param3 = 0;
-
- WORK_TO_REPLY(work_list, work);
- if (!work_pending(work_queue))
- schedule_work(work_queue);
- } else {
- vipx_err("free work is empty\n");
- break;
- }
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t interface_isr0(int irq, void *data)
-{
- struct vipx_interface *interface = (struct vipx_interface *)data;
- u32 val;
-
- val = readl(interface->regs + 0x8);
- if (val & 0x1) {
- val &= ~(0x1);
- writel(val, interface->regs + 0x8);
- interface_isr(irq, data);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t interface_isr1(int irq, void *data)
-{
- struct vipx_interface *interface = (struct vipx_interface *)data;
- u32 val;
-
- val = readl(interface->regs + 0x8);
- if (val & (0x1 << 0x1)) {
- val &= ~(0x1 << 0x1);
- writel(val, interface->regs + 0x8);
- interface_isr(irq, data);
- }
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_EXYNOS_EMUL_MBOX
-static void interface_timer(unsigned long data)
-{
- struct vipx_interface *interface = (struct vipx_interface *)data;
- struct vipx_mailbox_ctrl *mctrl;
- int ret = 0;
- u32 random;
-
- mctrl = interface->private_data;
-
- if (!test_bit(VIPX_ITF_STATE_START, &interface->state))
- goto exit;
-
- ret = emul_mbox_handler(mctrl);
- if (ret)
- {
- if (atomic_read(&checker) != 0) {
- vipx_info("nothing to handle, checker %d\n", atomic_read(&checker));
- }
- goto exit;
- }
-
- interface_isr(0, (void *)data);
-
-exit:
- get_random_bytes(&random, sizeof(random));
- random = random % TURN_AROUND_TIME;
- mod_timer(&interface->timer, jiffies + random);
-}
-#endif
-
-static void vipx_wq_func(struct work_struct *data)
-{
- struct vipx_interface *interface;
- struct vipx_taskmgr *itaskmgr;
- struct vipx_task *itask;
- struct vipx_work_list *work_list;
- struct vipx_work *work;
- u32 graph_idx;
- ulong flags;
-
- interface = container_of(data, struct vipx_interface, work_queue);
- itaskmgr = &interface->taskmgr;
- work_list = &interface->work_list;
-
- WORK_FR_REPLY(work_list, &work);
- while (work) {
- if (work->id >= VIPX_MAX_TASK) {
- vipx_err("work id is invalid(%d)\n", work->id);
- break;
- }
- itask = &itaskmgr->task[work->id];
-
- if (itask->state != VIPX_TASK_STATE_PROCESS) {
- vipx_err("task(%d, %d, %ld) state is invalid(%d), work(%d, %d, %d)\n",
- itask->message, itask->index, itask->param1, itask->state,
- work->id, work->work_param0, work->work_param1);
- vipx_interface_print(interface);
- BUG();
- }
-
- switch (itask->message) {
- case VIPX_TASK_INIT:
- case VIPX_TASK_DEINIT:
- case VIPX_TASK_CREATE:
- case VIPX_TASK_DESTROY:
- case VIPX_TASK_ALLOCATE:
- case VIPX_TASK_REQUEST:
- graph_idx = itask->param1;
- interface->reply[graph_idx] = *work;
- DEBUG_ISR("[DEBUG_ISR] set reply graph idx(%d)\n", graph_idx);
- __set_reply(interface, graph_idx);
- break;
- case VIPX_TASK_PROCESS:
- DEBUG_ISR("[DEBUG_ISR] TASK PROCESS\n");
-
-#ifdef NON_BLOCK_INVOKE
-#else
- graph_idx = itask->param1;
- interface->reply[graph_idx] = *work;
- __set_reply(interface, graph_idx);
-#endif
-
- taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_trans_pro_to_com(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
- itask->param2 = 0;
- itask->param3 = 0;
- vipx_graphmgr_queue(interface->cookie, itask);
- interface->done_cnt++;
- break;
- default:
- vipx_err("unresolved message(%d) have arrived\n", work->message);
- break;
- }
-
- WORK_TO_FREE(work_list, work);
- WORK_FR_REPLY(work_list, &work);
- }
-}
-
-int vipx_interface_probe(struct vipx_interface *interface,
- struct device *dev,
- void __iomem *regs,
- resource_size_t regs_size,
- u32 irq0, u32 irq1)
-{
- int ret = 0;
- struct vipx_device *device;
- struct vipx_system *system;
- struct vipx_taskmgr *taskmgr;
-
- BUG_ON(!interface);
- BUG_ON(!dev);
- BUG_ON(!regs);
-
- system = container_of(interface, struct vipx_system, interface);
- device = container_of(system, struct vipx_device, system);
-
- init_process_barrier(interface);
- init_waitqueue_head(&interface->reply_queue);
-
- interface->regs = regs;
- interface->regs_size = regs_size;
- interface->cookie = (void *)&device->graphmgr;
- clear_bit(VIPX_ITF_STATE_OPEN, &interface->state);
- clear_bit(VIPX_ITF_STATE_BOOTUP, &interface->state);
- clear_bit(VIPX_ITF_STATE_ENUM, &interface->state);
- clear_bit(VIPX_ITF_STATE_START, &interface->state);
- interface->private_data = kmalloc(sizeof(struct vipx_mailbox_ctrl), GFP_KERNEL);
- if (!interface->private_data) {
- probe_err("kmalloc is fail\n");
- ret = -ENOMEM;
- goto p_err;
- }
-
- ret = devm_request_irq(dev, irq0, interface_isr0, 0, dev_name(dev), interface);
- if (ret) {
- probe_err("devm_request_irq(0) is fail(%d)\n", ret);
- goto p_err;
- }
-
- ret = devm_request_irq(dev, irq1, interface_isr1, 0, dev_name(dev), interface);
- if (ret) {
- probe_err("devm_request_irq(1) is fail(%d)\n", ret);
- goto p_err;
- }
-
- taskmgr = &interface->taskmgr;
- taskmgr->id = VIPX_MAX_GRAPH;
- taskmgr->sindex = 0;
- spin_lock_init(&taskmgr->slock);
-
- ret = vipx_task_init(taskmgr, interface);
- if (ret) {
- probe_err("vipx_task_init is fail(%d)\n", ret);
- goto p_err;
- }
-
- ret = vipx_slab_init(&interface->slab);
- if (ret) {
- probe_err("vipx_slab_init is fail(%d)\n", ret);
- goto p_err;
- }
-
- INIT_WORK(&interface->work_queue, vipx_wq_func);
- INIT_WORK_LIST(&interface->work_list, VIPX_WORK_MAX_COUNT);
-
-p_err:
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_interface_open(struct vipx_interface *interface,
- void *mbox, size_t mbox_size)
-{
- struct vipx_mailbox_ctrl *mctrl;
- int ret = 0;
- u32 i;
-
- BUG_ON(!interface);
- BUG_ON(!mbox);
-
- interface->mbox = mbox;
- interface->mbox_size = mbox_size;
- memset(interface->mbox, 0x0, interface->mbox_size);
-
- mctrl = interface->private_data;
- mctrl->stack = vipx_mbox_g_stack(mbox, mbox_size);
- mctrl->urgent_stack = vipx_mbox_g_urgent_stack(mbox, mbox_size);
-
- ret = vipx_graphmgr_itf_register(interface->cookie, interface);
- if (ret) {
- vipx_err("vipx_graphmgr_itf_register is fail(%d)\n", ret);
- goto p_err;
- }
-
- atomic_set(&checker, 0);
-
- interface->process = NULL;
- for (i = 0; i < VIPX_MAX_GRAPH; ++i) {
- interface->request[i] = NULL;
- interface->reply[i].valid = 0;
- }
-
- interface->done_cnt = 0;
- set_bit(VIPX_ITF_STATE_OPEN, &interface->state);
- clear_bit(VIPX_ITF_STATE_BOOTUP, &interface->state);
- clear_bit(VIPX_ITF_STATE_ENUM, &interface->state);
- clear_bit(VIPX_ITF_STATE_START, &interface->state);
-
-p_err:
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_interface_close(struct vipx_interface *interface)
-{
- int ret = 0;
-
- BUG_ON(!interface);
-
- ret = __vipx_interface_cleanup(interface);
- if (ret)
- vipx_err("__vipx_interface_cleanup is fail(%d)\n", ret);
-
- ret = vipx_graphmgr_itf_unregister(interface->cookie, interface);
- if (ret)
- vipx_err("vipx_graphmgr_itf_unregister is fail(%d)\n", ret);
-
- interface->mbox = 0;
- interface->mbox_size = 0;
-
- clear_bit(VIPX_ITF_STATE_OPEN, &interface->state);
- clear_bit(VIPX_ITF_STATE_BOOTUP, &interface->state);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_interface_start(struct vipx_interface *interface)
-{
- int ret = 0;
-
- set_bit(VIPX_ITF_STATE_START, &interface->state);
-
-#ifdef CONFIG_EXYNOS_EMUL_MBOX
- init_timer(&interface->timer);
- interface->timer.expires = jiffies + TURN_AROUND_TIME;
- interface->timer.data = (unsigned long)interface;
- interface->timer.function = interface_timer;
- add_timer(&interface->timer);
-#endif
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_interface_stop(struct vipx_interface *interface)
-{
- int errcnt = 0;
- struct vipx_taskmgr *itaskmgr;
- u32 retry;
-
- itaskmgr = &interface->taskmgr;
-
- retry = VIPX_STOP_WAIT_COUNT;
- while (--retry && itaskmgr->req_cnt) {
- vipx_warn("waiting %d request completion...(%d)\n", itaskmgr->req_cnt, retry);
- msleep(1);
- }
-
- if (!retry) {
- vipx_err("request completion is fail\n");
- vipx_interface_print(interface);
- errcnt++;
- }
-
- retry = VIPX_STOP_WAIT_COUNT;
- while (--retry && itaskmgr->pro_cnt) {
- vipx_warn("waiting %d process completion...(%d)\n", itaskmgr->pro_cnt, retry);
- msleep(1);
- }
-
- if (!retry) {
- vipx_err("process completion is fail\n");
- vipx_interface_print(interface);
- errcnt++;
- }
-
- retry = VIPX_STOP_WAIT_COUNT;
- while (--retry && itaskmgr->com_cnt) {
- vipx_warn("waiting %d complete completion...(%d)\n", itaskmgr->com_cnt, retry);
- msleep(1);
- }
-
- if (!retry) {
- vipx_err("complete completion is fail\n");
- vipx_interface_print(interface);
- errcnt++;
- }
-
-#ifdef CONFIG_EXYNOS_EMUL_MBOX
- del_timer(&interface->timer);
-#endif
- clear_bit(VIPX_ITF_STATE_START, &interface->state);
-
- vipx_info("%s:\n", __func__);
- return 0;
-}
-
-int vipx_hw_wait_bootup(struct vipx_interface *interface)
-{
- int ret = 0;
- struct vipx_system *system = container_of(interface, struct vipx_system, interface);
-
- BUG_ON(!interface);
-
-#ifdef DUMP_DEBUG_LOG_REGION
- int try_cnt = 10;
- struct vipx_binary *binary = &system->binary;
- while (try_cnt && !test_bit(VIPX_ITF_STATE_BOOTUP, &interface->state)) {
- msleep(1);
- try_cnt--;
- vipx_info("%s(): wait CM7 bootup\n",__func__);
- }
-#endif
-
- vipx_info("debug kva(0x%p), dva(0x%x), size(%ld)\n", system->memory.info.kvaddr_debug, (u32)system->memory.info.dvaddr_debug, system->memory.info.pb_debug->size);
-
-#ifdef DUMP_DEBUG_LOG_REGION
- if (try_cnt == 0)
- {
- if (!IS_ERR_OR_NULL(system->memory.info.kvaddr_debug)) {
- ret = vipx_binary_write(binary, VIPX_FW_PATH1, "vipx_log.bin",
- system->memory.info.kvaddr_debug, VIPX_DEBUG_SIZE);
- if (ret)
- vipx_err("vipx_binary_write is fail(%d)\n", ret);
- }
- ret = -EINVAL;
- }
-#endif
- vipx_info("%s():%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_hw_enum(struct vipx_interface *interface)
-{
- int ret = 0;
-
- BUG_ON(!interface);
-
- set_bit(VIPX_ITF_STATE_ENUM, &interface->state);
-
- vipx_info("%s():%d\n", __func__, ret);
- return ret;
-}
-
-/* Init Req */
-int vipx_hw_init(struct vipx_interface *interface, struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- ulong flag;
- vipx_msg_t payload;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
-
- payload.transId = itask->index;
- payload.type = INIT_REQ;
-
- /* TODO */
- /* fill init_req */
-#if 0
- payload.msg_u.init_req.p_vip_core_bin = 0;
- payload.msg_u.init_req.sz_vip_core_bin = 0;
- payload.msg_u.init_req.p_cc_log = 0;
- payload.msg_u.init_req.sz_cc_log = 0;
-#endif
-
- payload.msg_u.init_req.p_cc_heap = 0;
- payload.msg_u.init_req.sz_cc_heap = 0;
-
- itask->param0 = (ulong)&payload;
- /* Do not update param1. param1 is graph_idx */
- /* itask->param1 = 0; */
- itask->param2 = sizeof(vipx_msg_t);
- itask->param3 = VIPX_MTYPE_H2F_URGENT;
-
- ret = __vipx_set_cmd(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_any_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-/* Power doen Req */
-int vipx_hw_deinit(struct vipx_interface *interface, struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- ulong flag;
- vipx_msg_t payload;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
-
- /* TODO */
- payload.transId = itask->index;
- payload.type = POWER_DOWN_REQ;
-
- /* fill powerdown_graph_req */
- payload.msg_u.powerdown_req.valid = 1;
-
- itask->param0 = (ulong)&payload;
- /* Do not update param1. param1 is graph_idx */
- /* itask->param1 = 0; */
- itask->param2 = sizeof(vipx_msg_t);
- itask->param3 = VIPX_MTYPE_H2F_URGENT;
-
- ret = __vipx_set_cmd(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_any_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-/* Create graph */
-int vipx_hw_create(struct vipx_interface *interface, struct vipx_task *itask)
-{
- int ret = 0;
-
- struct vipx_taskmgr *itaskmgr;
- ulong flag;
- vipx_msg_t payload;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
-
- /* TODO */
- payload.transId = itask->index;
- payload.type = CREATE_GRAPH_REQ;
-
- /* fill create_graph_req */
- payload.msg_u.create_graph_req.p_graph = 0;
- payload.msg_u.create_graph_req.sz_graph = 0;
-
- itask->param0 = (ulong)&payload;
- /* Do not update param1. param1 is graph_idx */
- /* itask->param1 = 0; */
- itask->param2 = sizeof(vipx_msg_t);
- itask->param3 = VIPX_MTYPE_H2F_NORMAL;
-
- ret = __vipx_set_cmd(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_any_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- vipx_info("%s:%d\n", __func__, ret);
-
- return ret;
-}
-
-/* Destroy graph */
-int vipx_hw_destroy(struct vipx_interface *interface, struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- struct vipx_system *system = container_of(interface, struct vipx_system, interface);
- ulong flag;
- vipx_msg_t payload;
- u32 heap_size;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
-
- /* TODO */
- payload.transId = itask->index;
- payload.type = DESTROY_GRAPH_REQ;
-
- /* fill destroy_graph_req */
- payload.msg_u.destroy_graph_req.graph_id = itask->param0;
- vipx_dbg("[%s] graph_id(%d)\n", __func__, payload.msg_u.destroy_graph_req.graph_id);
-
- itask->param0 = (ulong)&payload;
- /* Do not update param1. param1 is graph_idx */
- /* itask->param1 = 0; */
- itask->param2 = sizeof(vipx_msg_t);
- itask->param3 = VIPX_MTYPE_H2F_NORMAL;
-
- ret = __vipx_set_cmd(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd is fail(%d)\n", ret);
- goto p_err;
- }
-
- if (payload.msg_u.destroy_graph_req.graph_id < SCENARIO_DE_CAPTURE)
- heap_size = VIPX_CM7_HEAP_SIZE_PREVIEW;
- else if ((payload.msg_u.destroy_graph_req.graph_id == SCENARIO_ENF) ||
- (payload.msg_u.destroy_graph_req.graph_id == SCENARIO_ENF_YUV))
- heap_size = VIPX_CM7_HEAP_SIZE_ENF;
- else
- heap_size = VIPX_CM7_HEAP_SIZE_CAPTURE;
-
- vipx_free_heap(&system->memory, &system->binary, heap_size);
-
- vipx_info("checker %d\n", atomic_read(&checker));
-
-p_err:
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_any_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_hw_config(struct vipx_interface *interface, struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- struct vipx_system *system = container_of(interface, struct vipx_system, interface);
- ulong flag;
- vipx_msg_t payload;
- int i = 0, j = 0;
- int num_inputs = 0;
- int num_outputs = 0;
- int cur_width = 0;
- int cur_height = 0;
- dma_addr_t dvaddr_heap = 0;
- u32 heap_size;
-
- struct vipx_format_list *in_list;
- struct vipx_format_list *out_list;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
- in_list = (void *)itask->param2;
- out_list = (void *)itask->param3;
-
- payload.transId = itask->index;
- payload.type = SET_GRAPH_REQ;
-
- /* fill set_graph_req */
- payload.msg_u.set_graph_req.graph_id = itask->param0;
- vipx_dbg("[%s] graph_id(%d)\n", __func__, payload.msg_u.set_graph_req.graph_id);
-
- /* TODO : How to update depth field */
- for (i = 0; i < in_list->count; i++) {
- vipx_info("in-buf[%d], fmt %d, plane %d, size: %dx%d\n",
- i, in_list->formats[i].format,
- in_list->formats[i].plane,
- in_list->formats[i].width,
- in_list->formats[i].height);
- for (j = 0; j < in_list->formats[i].plane; j++) {
- ret = __get_size_from(&in_list->formats[i], j, &cur_width, &cur_height);
- if (ret) {
- vipx_err("__get_size_from fail (%d)\n", ret);
- goto p_err;
- }
- payload.msg_u.set_graph_req.input_width[num_inputs] = cur_width;
- payload.msg_u.set_graph_req.input_height[num_inputs] = cur_height;
- payload.msg_u.set_graph_req.input_depth[num_inputs] = 0;
- vipx_info("input[%d] WxH(%dx%d), depth(%d)\n", num_inputs,
- payload.msg_u.set_graph_req.input_width[num_inputs],
- payload.msg_u.set_graph_req.input_height[num_inputs],
- payload.msg_u.set_graph_req.input_depth[num_inputs]
- );
- num_inputs++;
- }
- }
-
- for (i = 0; i < out_list->count; i++) {
- vipx_info("out-buf[%d], fmt %d, plane %d, size: %dx%d\n",
- i, out_list->formats[i].format,
- out_list->formats[i].plane,
- out_list->formats[i].width,
- out_list->formats[i].height);
- for (j = 0; j < out_list->formats[i].plane; j++) {
- ret = __get_size_from(&out_list->formats[i], j, &cur_width, &cur_height);
- if (ret) {
- vipx_err("__get_size_from fail (%d)\n", ret);
- goto p_err;
- }
- payload.msg_u.set_graph_req.output_width[num_outputs] = cur_width;
- payload.msg_u.set_graph_req.output_height[num_outputs] = cur_height;
- payload.msg_u.set_graph_req.output_depth[num_outputs] = 0;
- vipx_info("output[%d] WxH(%dx%d), depth(%d)\n", num_outputs,
- payload.msg_u.set_graph_req.output_width[num_outputs],
- payload.msg_u.set_graph_req.output_height[num_outputs],
- payload.msg_u.set_graph_req.output_depth[num_outputs]
- );
- num_outputs++;
- }
- }
- // Allocate and assign heap
- if (payload.msg_u.set_graph_req.graph_id < SCENARIO_DE_CAPTURE)
- heap_size = VIPX_CM7_HEAP_SIZE_PREVIEW;
- else if ((payload.msg_u.destroy_graph_req.graph_id == SCENARIO_ENF_UV) ||
- (payload.msg_u.destroy_graph_req.graph_id == SCENARIO_ENF) ||
- (payload.msg_u.destroy_graph_req.graph_id == SCENARIO_ENF_YUV))
- heap_size = VIPX_CM7_HEAP_SIZE_ENF;
- else
- heap_size = VIPX_CM7_HEAP_SIZE_CAPTURE;
- dvaddr_heap = vipx_allocate_heap(&system->memory, heap_size);
-
- payload.msg_u.set_graph_req.p_temp = dvaddr_heap;
- payload.msg_u.set_graph_req.sz_temp = heap_size;
- vipx_info("CC heap dva(0x%x), size(%d)\n", (u32)dvaddr_heap, heap_size);
-
- payload.msg_u.set_graph_req.num_inputs = num_inputs;
- payload.msg_u.set_graph_req.num_outputs = num_outputs;
- vipx_info("input cnt(%d), output cnt(%d)\n", num_inputs, num_outputs);
-
- itask->param0 = (ulong)&payload;
- /* Do not update param1. param1 is graph_idx */
- /* itask->param1 = 0; */
- itask->param2 = sizeof(vipx_msg_t);
- itask->param3 = VIPX_MTYPE_H2F_NORMAL;
-
- ret = __vipx_set_cmd(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_any_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_hw_process(struct vipx_interface *interface, struct vipx_task *itask)
-{
- int ret = 0;
- struct vipx_taskmgr *itaskmgr;
- ulong flag;
- vipx_msg_t payload;
- int i = 0, j = 0;
- int num_inputs = 0;
- int num_outputs = 0;
- int num_user_params = 0;
-
- struct vb_container_list *incl;
- struct vb_container_list *otcl;
-
- BUG_ON(!interface);
- BUG_ON(!itask);
-
- itaskmgr = &interface->taskmgr;
- incl = itask->incl;
- otcl = itask->otcl;
-
- payload.transId = itask->index;
- payload.type = INVOKE_GRAPH_REQ;
-
- /* use itask->incl and itask->otcl */
- payload.msg_u.invoke_graph_req.graph_id = itask->param0;
- vipx_dbg("[%s] graph_id(%d)\n", __func__, payload.msg_u.invoke_graph_req.graph_id);
-
- /* TODO : We need to consider to support multi plain buffer */
- for (i = 0; i < incl->count; i++) {
- for (j = 0; j < incl->containers[i].count; j++) {
- payload.msg_u.invoke_graph_req.p_input[num_inputs] = incl->containers[i].buffers[j].dvaddr;
- vipx_dbg("input[%d] buffer dvaddr(%x)\n", num_inputs,
- payload.msg_u.invoke_graph_req.p_input[num_inputs]);
- num_inputs++;
- }
- }
- for (i = 0; i < otcl->count; i++) {
- for (j = 0; j < otcl->containers[i].count; j++) {
- payload.msg_u.invoke_graph_req.p_output[num_outputs] = otcl->containers[i].buffers[j].dvaddr;
- vipx_dbg("output[%d] buffer dvaddr(%x)\n", num_outputs,
- payload.msg_u.invoke_graph_req.p_output[num_outputs]);
- num_outputs++;
- }
- }
- for (num_user_params = 0; num_user_params < MAX_NUM_OF_USER_PARAMS; num_user_params++) {
- payload.msg_u.invoke_graph_req.user_params[num_user_params] = incl->user_params[num_user_params];
- vipx_dbg("user_params[%d] = (%d)\n", num_user_params,
- payload.msg_u.invoke_graph_req.user_params[num_user_params]);
- }
-
- payload.msg_u.invoke_graph_req.num_inputs = num_inputs;
- payload.msg_u.invoke_graph_req.num_outputs = num_outputs;
- vipx_dbg("input cnt(%d), output cnt(%d)\n", num_inputs, num_outputs);
-
- itask->param0 = (ulong)&payload;
- /* Do not update param1. param1 is graph_idx */
- /* itask->param1 = 0; */
- itask->param2 = sizeof(vipx_msg_t);
- itask->param3 = VIPX_MTYPE_H2F_NORMAL;
-
-#ifdef NON_BLOCK_INVOKE
- ret = __vipx_set_cmd_nblk(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd_nblk is fail(%d)\n", ret);
- goto p_err;
- }
-#else
- ret = __vipx_set_cmd(interface, itask);
- if (ret) {
- vipx_err("__vipx_set_cmd is fail(%d)\n", ret);
- goto p_err;
- }
-#endif
-
- vipx_dbg("%s:%d\n", __func__, ret);
- return 0;
-
-p_err:
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
- vipx_task_trans_any_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
-
- return ret;
-}
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-
-#include "vipx-config.h"
-#include "vipx-io.h"
-#include "vipx-mailbox.h"
-#include "vipx-interface.h"
-
-#include <interface/ap_vip_if.h>
-
-#define INDEX(x) (x % MAX_MESSAGE_CNT)
-
-struct vipx_mailbox_stack * vipx_mbox_g_stack(void *mbox, u32 mbox_size)
-{
- vipx_info("sizeof mbox stack (%lx) address of normal mbox (%p)\n",
- sizeof(struct vipx_mailbox_stack), mbox);
- return (struct vipx_mailbox_stack *)mbox;
-}
-
-struct vipx_mailbox_stack * vipx_mbox_g_urgent_stack(void *mbox, u32 mbox_size)
-{
- vipx_info("address of urgent mbox (%p)\n", ((char *)mbox + sizeof(struct vipx_mailbox_stack)));
- return (struct vipx_mailbox_stack *)((char *)mbox + sizeof(struct vipx_mailbox_stack));
-}
-
-static u32 __vipx_mbox_g_freesize(struct vipx_mailbox_h2f *mbox)
-{
- u32 wmsg_idx = 0;
- u32 rmsg_idx = 0;
- u32 free_size = 0;
-
- BUG_ON(!mbox);
-
- wmsg_idx = mbox->wmsg_idx;
- rmsg_idx = mbox->rmsg_idx;
-
- free_size = MAX_MESSAGE_CNT - (wmsg_idx - rmsg_idx);
-
- BUG_ON(free_size < 0);
-
- return free_size;
-}
-
-int vipx_mbox_ready(struct vipx_mailbox_ctrl *mctrl,
- size_t size, u32 type)
-{
- u32 try_count;
- struct vipx_mailbox_h2f *mbox;
- u16 free_size16;
-
- BUG_ON(!mctrl);
- BUG_ON(!IS_ALIGNED(size, 2));
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->h2f;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->h2f;
- } else {
- vipx_err("invalid type(%d)\n", type);
- return -EINVAL;
- }
- try_count = 1000;
-
- free_size16 = __vipx_mbox_g_freesize(mbox);
- while (--try_count && (free_size16 == 0)) {
- vipx_warn("mbox is not ready(freesize %d)...(%d)\n",
- free_size16, try_count);
- udelay(10);
- free_size16 = __vipx_mbox_g_freesize(mbox);
- }
-
- if (try_count)
- return 0;
- else
- return -EBUSY;
-}
-
-int vipx_mbox_wait_reply(struct vipx_mailbox_ctrl *mctrl,
- u32 cmd, u32 type)
-{
- int ret = 0;
- struct vipx_mailbox_f2h *mbox;
- int try_count;
-
- if (type == VIPX_MTYPE_F2H_NORMAL) {
- mbox = &mctrl->stack->f2h;
- } else if (type == VIPX_MTYPE_F2H_URGENT) {
- mbox = &mctrl->urgent_stack->f2h;
- } else {
- vipx_err("cmd(%d) has invalid type(%d)\n", cmd, type);
- ret = -EINVAL;
- goto p_err;
- }
-
- try_count = 1000;
- while (--try_count && (mbox->wmsg_idx == mbox->rmsg_idx)) {
- vipx_warn("waiting vipx reply(%d, %d)...(%d)\n", mbox->wmsg_idx, mbox->rmsg_idx, try_count);
- msleep(1);
- }
-
- if (try_count <= 0) {
- vipx_err("waiting vipx reply is timeout\n");
- ret = -EINVAL;
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-int vipx_mbox_write(struct vipx_mailbox_ctrl *mctrl,
- void *payload, size_t size, u32 type, u32 gid, u32 cmd, u32 cid)
-{
- int ret = 0;
- struct vipx_mailbox_h2f *mbox;
- u32 *wptr;
-
- BUG_ON(!mctrl);
- BUG_ON(!payload);
- BUG_ON(!IS_ALIGNED(size, 2));
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->h2f;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->h2f;
- } else {
- vipx_err("invalid type(%d)\n", type);
- ret = -EINVAL;
- goto p_err;
- }
- wptr = (void *)&mbox->msg[mbox->wmsg_idx % MAX_MESSAGE_CNT];
-
- mem2iocpy(wptr, payload, size);
-
- /* increment actual write pointer */
- mbox->wmsg_idx++;
-
-#ifdef DBG_INTERFACE_ISR
- vipx_info("[I%d][MBOX][H2F] %d command(type: %d, cid: %d, wmsg_idx: %d, rmsg_idx: %d)\n", gid,
- cmd, type, cid, mbox->wmsg_idx, mbox->rmsg_idx);
-#endif
-
-p_err:
- return ret;
-}
-
-int vipx_mbox_read(struct vipx_mailbox_ctrl *mctrl,
- void *payload, u32 type, void *debug_data)
-{
- int ret = 0;
- struct vipx_mailbox_f2h *mbox;
- u32 *rptr;
-
- BUG_ON(!mctrl);
- BUG_ON(!payload);
-
- if (type == VIPX_MTYPE_H2F_NORMAL) {
- mbox = &mctrl->stack->f2h;
- } else if (type == VIPX_MTYPE_H2F_URGENT) {
- mbox = &mctrl->urgent_stack->f2h;
- } else {
- vipx_err("invalid type(%d)\n", type);
- ret = -EINVAL;
- goto p_err;
- }
- rptr = (void *)&mbox->msg[mbox->rmsg_idx % MAX_MESSAGE_CNT];
-
- io2memcpy(payload, rptr, sizeof(vipx_msg_t));
-
- /* increment actual read pointer */
- mbox->rmsg_idx++;
-
-p_err:
- return ret;
-}
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-
-#include "vipx-config.h"
-#include "vipx-slab.h"
-
-int vipx_slab_init(struct vipx_slab_allocator *allocator)
-{
- int ret = 0, i;
- char name[100];
- size_t size;
-
- allocator->cache_size[0] = sizeof(vipx_msg_t);
-
- for (i = 0; i < VIPX_SLAB_MAX_LEVEL; ++i) {
- size = allocator->cache_size[i];
- snprintf(name, sizeof(name), "vipx=size-%zd", size);
-
- allocator->cache[i] = kmem_cache_create(name, size,
- ARCH_KMALLOC_MINALIGN, SLAB_POISON | SLAB_PANIC, NULL);
- if (!allocator->cache[i]) {
- probe_err("kmem_cache_create(%zd) is fail\n", size);
- ret = -ENOMEM;
- goto p_err;
- }
- }
-
-p_err:
- return ret;
-}
-
-int vipx_slab_alloc(struct vipx_slab_allocator *allocator, void **target, size_t size)
-{
- int ret = 0, i;
-
- for (i = 0; i < VIPX_SLAB_MAX_LEVEL; ++i) {
- if (size <= allocator->cache_size[i])
- break;
- }
-
- if (i >= VIPX_SLAB_MAX_LEVEL) {
- vipx_err("alloc size is invalid(%zd)\n", size);
- ret= -EINVAL;
- goto p_err;
- }
-
- *target = kmem_cache_alloc(allocator->cache[i], GFP_KERNEL);
- if (!(*target)) {
- vipx_err("kmem_cache_alloc is fail\n");
- ret = -ENOMEM;
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-int vipx_slab_free(struct vipx_slab_allocator *allocator, void *target, size_t size)
-{
- int ret = 0, i;
-
- for (i = 0; i < VIPX_SLAB_MAX_LEVEL; ++i) {
- if (size <= allocator->cache_size[i])
- break;
- }
-
- if (i >= VIPX_SLAB_MAX_LEVEL) {
- vipx_err("alloc size is invalid(%zd)\n", size);
- BUG();
- }
-
- kmem_cache_free(allocator->cache[i], target);
-
- return ret;
-}
-config EXYNOS_VIPX_PLATFORM
+menuconfig EXYNOS_VIPX_PLATFORM
bool "Select Platform"
help
This is a selection of platform
config EXYNOS_VIPX_EXYNOS9610
bool "Use Exynos9610 platform"
depends on EXYNOS_VIPX_PLATFORM
+ depends on SOC_EXYNOS9610
help
This is a exynos9610 platform
-obj-y += vipx-exynos.o
-obj-$(CONFIG_EXYNOS_VIPX_EXYNOS9610) += exynos9610/
+#
+# Makefile for the VIPx driver depends on platform
+#
-EXTRA_CFLAGS += -Idrivers/vision/vipx/include
+obj-$(CONFIG_EXYNOS_VIPX_EXYNOS9610) += exynos9610/
-obj-y += vipx-exynos9610.o
+#
+# Makefile for the VIPx driver depends on exynos9610 platform
+#
-EXTRA_CFLAGS += -Idrivers/vision/vipx/include
+obj-y += vipx-clk-exynos9610.o
+obj-y += vipx-ctrl-exynos9610.o
+
+ccflags-y += -Idrivers/vision/vipx
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPX driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vipx-log.h"
+#include "vipx-system.h"
+#include "platform/vipx-clk.h"
+
+enum vipx_clk_id {
+ VIPX_CLK_UMUX_CLKCMU_VIPX1_BUS,
+ VIPX_CLK_GATE_VIPX1_QCH,
+ VIPX_CLK_UMUX_CLKCMU_VIPX2_BUS,
+ VIPX_CLK_GATE_VIPX2_QCH,
+ VIPX_CLK_GATE_VIPX2_QCH_LOCAL,
+ VIPX_CLK_MAX
+};
+
+static struct vipx_clk vipx_exynos9610_clk_array[] = {
+ { NULL, "UMUX_CLKCMU_VIPX1_BUS" },
+ { NULL, "GATE_VIPX1_QCH" },
+ { NULL, "UMUX_CLKCMU_VIPX2_BUS" },
+ { NULL, "GATE_VIPX2_QCH" },
+ { NULL, "GATE_VIPX2_QCH_LOCAL" },
+};
+
+static int vipx_exynos9610_clk_init(struct vipx_system *sys)
+{
+ int ret;
+ int index;
+ const char *name;
+ struct clk *clk;
+
+ vipx_enter();
+ if (ARRAY_SIZE(vipx_exynos9610_clk_array) != VIPX_CLK_MAX) {
+ ret = -EINVAL;
+ vipx_err("clock array size is invalid (%zu/%d)\n",
+ ARRAY_SIZE(vipx_exynos9610_clk_array),
+ VIPX_CLK_MAX);
+ goto p_err;
+ }
+
+ for (index = 0; index < VIPX_CLK_MAX; ++index) {
+ name = vipx_exynos9610_clk_array[index].name;
+ if (!name) {
+ ret = -EINVAL;
+ vipx_err("clock name is NULL (%d)\n", index);
+ goto p_err;
+ }
+
+ clk = devm_clk_get(sys->dev, name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ vipx_err("Failed to get clock(%s/%d) (%d)\n",
+ name, index, ret);
+ goto p_err;
+ }
+
+ vipx_exynos9610_clk_array[index].clk = clk;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void vipx_exynos9610_clk_deinit(struct vipx_system *sys)
+{
+ vipx_enter();
+ vipx_leave();
+}
+
+static int vipx_exynos9610_clk_cfg(struct vipx_system *sys)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_exynos9610_clk_on(struct vipx_system *sys)
+{
+ int ret;
+ int index;
+ const char *name;
+ struct clk *clk;
+
+ vipx_enter();
+ for (index = 0; index < VIPX_CLK_MAX; ++index) {
+ name = vipx_exynos9610_clk_array[index].name;
+ clk = vipx_exynos9610_clk_array[index].clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ vipx_err("Failed to enable clock(%s/%d) (%d)\n",
+ name, index, ret);
+ goto p_err;
+ }
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_exynos9610_clk_off(struct vipx_system *sys)
+{
+ int index;
+ const char *name;
+ struct clk *clk;
+
+ vipx_enter();
+ for (index = VIPX_CLK_MAX - 1; index >= 0; --index) {
+ name = vipx_exynos9610_clk_array[index].name;
+ clk = vipx_exynos9610_clk_array[index].clk;
+
+ clk_disable_unprepare(clk);
+ }
+
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_exynos9610_clk_dump(struct vipx_system *sys)
+{
+ int index;
+ const char *name;
+ struct clk *clk;
+ unsigned long freq;
+
+ vipx_enter();
+ for (index = 0; index < VIPX_CLK_MAX; ++index) {
+ name = vipx_exynos9610_clk_array[index].name;
+ clk = vipx_exynos9610_clk_array[index].clk;
+
+ freq = clk_get_rate(clk);
+ vipx_info("%30s(%d) : %9lu Hz\n", name, index, freq);
+ }
+
+ vipx_leave();
+ return 0;
+}
+
+const struct vipx_clk_ops vipx_clk_ops = {
+ .clk_init = vipx_exynos9610_clk_init,
+ .clk_deinit = vipx_exynos9610_clk_deinit,
+ .clk_cfg = vipx_exynos9610_clk_cfg,
+ .clk_on = vipx_exynos9610_clk_on,
+ .clk_off = vipx_exynos9610_clk_off,
+ .clk_dump = vipx_exynos9610_clk_dump
+};
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "vipx-log.h"
+#include "vipx-system.h"
+#include "platform/vipx-ctrl.h"
+
+enum vipx_reg_ss1_id {
+ REG_SS1_VERSION_ID,
+ REG_SS1_QCHANNEL,
+ REG_SS1_IRQ_FROM_DEVICE,
+ REG_SS1_IRQ0_TO_DEVICE,
+ REG_SS1_IRQ1_TO_DEVICE,
+ REG_SS1_GLOBAL_CTRL,
+ REG_SS1_CORTEX_CONTROL,
+ REG_SS1_CPU_CTRL,
+ REG_SS1_PROGRAM_COUNTER,
+ REG_SS1_DEBUG0,
+ REG_SS1_DEBUG1,
+ REG_SS1_DEBUG2,
+ REG_SS1_DEBUG3,
+ REG_SS1_MAX
+};
+
+enum vipx_reg_ss2_id {
+ REG_SS2_QCHANNEL,
+ REG_SS2_GLOBAL_CTRL,
+ REG_SS2_MAX
+};
+
+static const struct vipx_reg regs_ss1[] = {
+ {0x0000, "SS1_VERSION_ID"},
+ {0x0004, "SS1_QCHANNEL"},
+ {0x0008, "SS1_IRQ_FROM_DEVICE"},
+ {0x000C, "SS1_IRQ0_TO_DEVICE"},
+ {0x0010, "SS1_IRQ1_TO_DEVICE"},
+ {0x0014, "SS1_GLOBAL_CTRL"},
+ {0x0018, "SS1_CORTEX_CONTROL"},
+ {0x001C, "SS1_CPU_CTRL"},
+ {0x0044, "SS1_PROGRAM_COUNTER"},
+ {0x0048, "SS1_DEBUG0"},
+ {0x004C, "SS1_DEBUG1"},
+ {0x0050, "SS1_DEBUG2"},
+ {0x0054, "SS1_DEBUG3"},
+};
+
+static const struct vipx_reg regs_ss2[] = {
+ {0x0000, "SS2_QCHANNEL"},
+ {0x0004, "SS2_GLOBAL_CTRL"},
+};
+
+static int vipx_exynos9610_ctrl_reset(struct vipx_system *sys)
+{
+ void __iomem *ss1, *ss2;
+ unsigned int val;
+
+ vipx_enter();
+ ss1 = sys->reg_ss[REG_SS1];
+ ss2 = sys->reg_ss[REG_SS2];
+
+ /* TODO: check delay */
+ val = readl(ss1 + regs_ss1[REG_SS1_GLOBAL_CTRL].offset);
+ writel(val | 0xF1, ss1 + regs_ss1[REG_SS1_GLOBAL_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss2 + regs_ss2[REG_SS2_GLOBAL_CTRL].offset);
+ writel(val | 0xF1, ss2 + regs_ss2[REG_SS2_GLOBAL_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_CPU_CTRL].offset);
+ writel(val | 0x1, ss1 + regs_ss1[REG_SS1_CPU_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ writel(val | 0x1, ss1 + regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ udelay(10);
+
+ writel(0x0, ss1 + regs_ss1[REG_SS1_CPU_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_QCHANNEL].offset);
+ writel(val | 0x1, ss1 + regs_ss1[REG_SS1_QCHANNEL].offset);
+ udelay(10);
+
+ val = readl(ss2 + regs_ss2[REG_SS2_QCHANNEL].offset);
+ writel(val | 0x1, ss2 + regs_ss2[REG_SS2_QCHANNEL].offset);
+ udelay(10);
+
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_exynos9610_ctrl_start(struct vipx_system *sys)
+{
+ vipx_enter();
+ writel(0x0, sys->reg_ss[REG_SS1] +
+ regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_exynos9610_ctrl_get_irq(struct vipx_system *sys, int direction)
+{
+ unsigned int offset;
+ int val;
+
+ vipx_enter();
+ if (direction == IRQ0_TO_DEVICE) {
+ offset = regs_ss1[REG_SS1_IRQ0_TO_DEVICE].offset;
+ val = readl(sys->reg_ss[REG_SS1] + offset);
+ } else if (direction == IRQ1_TO_DEVICE) {
+ offset = regs_ss1[REG_SS1_IRQ1_TO_DEVICE].offset;
+ val = readl(sys->reg_ss[REG_SS1] + offset);
+ } else if (direction == IRQ_FROM_DEVICE) {
+ offset = regs_ss1[REG_SS1_IRQ_FROM_DEVICE].offset;
+ val = readl(sys->reg_ss[REG_SS1] + offset);
+ } else {
+ val = -EINVAL;
+ vipx_err("Failed to get irq due to invalid direction (%d)\n",
+ direction);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return val;
+p_err:
+ return val;
+}
+
+static int vipx_exynos9610_ctrl_set_irq(struct vipx_system *sys, int direction,
+ int val)
+{
+ int ret;
+ unsigned int offset;
+
+ vipx_enter();
+ if (direction == IRQ0_TO_DEVICE) {
+ offset = regs_ss1[REG_SS1_IRQ0_TO_DEVICE].offset;
+ writel(val, sys->reg_ss[REG_SS1] + offset);
+ } else if (direction == IRQ1_TO_DEVICE) {
+ offset = regs_ss1[REG_SS1_IRQ1_TO_DEVICE].offset;
+ writel(val, sys->reg_ss[REG_SS1] + offset);
+ } else if (direction == IRQ_FROM_DEVICE) {
+ ret = -EINVAL;
+ vipx_err("Host can't set irq from device (%d)\n", direction);
+ goto p_err;
+ } else {
+ ret = -EINVAL;
+ vipx_err("Failed to set irq due to invalid direction (%d)\n",
+ direction);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_exynos9610_ctrl_clear_irq(struct vipx_system *sys,
+ int direction, int val)
+{
+ int ret;
+ unsigned int offset;
+
+ vipx_enter();
+ if (direction == IRQ0_TO_DEVICE || direction == IRQ1_TO_DEVICE) {
+ ret = -EINVAL;
+ vipx_err("Irq to device must be cleared at device (%d)\n",
+ direction);
+ goto p_err;
+ } else if (direction == IRQ_FROM_DEVICE) {
+ offset = regs_ss1[REG_SS1_IRQ_FROM_DEVICE].offset;
+ writel(val, sys->reg_ss[REG_SS1] + offset);
+ } else {
+ val = -EINVAL;
+ vipx_err("direction of irq is invalid (%d)\n", direction);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_exynos9610_ctrl_debug_dump(struct vipx_system *sys)
+{
+ void __iomem *ss1, *ss2;
+ int idx;
+ unsigned int val;
+ const char *name;
+
+ vipx_enter();
+ ss1 = sys->reg_ss[REG_SS1];
+ ss2 = sys->reg_ss[REG_SS2];
+
+ vipx_info("SS1 SFR Dump (count:%d)\n", REG_SS1_MAX);
+ for (idx = 0; idx < REG_SS1_MAX; ++idx) {
+ val = readl(ss1 + regs_ss1[idx].offset);
+ name = regs_ss1[idx].name;
+ vipx_info("[%2d][%20s] 0x%08x\n", idx, name, val);
+ }
+
+ vipx_info("SS2 SFR Dump (count:%d)\n", REG_SS2_MAX);
+ for (idx = 0; idx < REG_SS2_MAX; ++idx) {
+ val = readl(ss2 + regs_ss2[idx].offset);
+ name = regs_ss2[idx].name;
+ vipx_info("[%2d][%20s] 0x%08x\n", idx, name, val);
+ }
+
+ vipx_leave();
+ return 0;
+}
+
+const struct vipx_ctrl_ops vipx_ctrl_ops = {
+ .reset = vipx_exynos9610_ctrl_reset,
+ .start = vipx_exynos9610_ctrl_start,
+ .get_irq = vipx_exynos9610_ctrl_get_irq,
+ .set_irq = vipx_exynos9610_ctrl_set_irq,
+ .clear_irq = vipx_exynos9610_ctrl_clear_irq,
+ .debug_dump = vipx_exynos9610_ctrl_debug_dump,
+};
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPX driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/delay.h>
-
-#include "vipx-config.h"
-#include "vipx-exynos.h"
-#include "vipx-exynos9610.h"
-#include "vipx-debug.h"
-
-#define CLK_INDEX(name) VIPX_##name
-#define REGISTER_CLK(name) [CLK_INDEX(name)] = {#name, NULL}
-
-extern int vipx_clk_set_rate(struct device *dev, u32 index, ulong frequency);
-extern ulong vipx_clk_get_rate(struct device *dev, u32 index);
-extern int vipx_clk_enable(struct device *dev, u32 index);
-extern int vipx_clk_disable(struct device *dev, u32 index);
-
-enum vipx_clk_index {
- CLK_INDEX(UMUX_CLKCMU_VIPX1_BUS),
- CLK_INDEX(GATE_VIPX1_QCH),
- CLK_INDEX(UMUX_CLKCMU_VIPX2_BUS),
- CLK_INDEX(GATE_VIPX2_QCH),
- CLK_INDEX(GATE_VIPX2_QCH_LOCAL)
-};
-
-struct vipx_clk vipx_clk_array[] = {
- REGISTER_CLK(UMUX_CLKCMU_VIPX1_BUS),
- REGISTER_CLK(GATE_VIPX1_QCH),
- REGISTER_CLK(UMUX_CLKCMU_VIPX2_BUS),
- REGISTER_CLK(GATE_VIPX2_QCH),
- REGISTER_CLK(GATE_VIPX2_QCH_LOCAL)
-};
-
-const u32 vipx_clk_array_size = ARRAY_SIZE(vipx_clk_array);
-
-int vipx_exynos_clk_cfg(struct vipx_exynos *exynos)
-{
- return 0;
-}
-
-int vipx_exynos_clk_on(struct vipx_exynos *exynos)
-{
- vipx_clk_enable(exynos->dev, CLK_INDEX(UMUX_CLKCMU_VIPX1_BUS));
- vipx_clk_get_rate(exynos->dev, CLK_INDEX(UMUX_CLKCMU_VIPX1_BUS));
-
- vipx_clk_enable(exynos->dev, CLK_INDEX(GATE_VIPX1_QCH));
- vipx_clk_get_rate(exynos->dev, CLK_INDEX(GATE_VIPX1_QCH));
-
- vipx_clk_enable(exynos->dev, CLK_INDEX(UMUX_CLKCMU_VIPX2_BUS));
- vipx_clk_get_rate(exynos->dev, CLK_INDEX(UMUX_CLKCMU_VIPX2_BUS));
-
- vipx_clk_enable(exynos->dev, CLK_INDEX(GATE_VIPX2_QCH));
- vipx_clk_get_rate(exynos->dev, CLK_INDEX(GATE_VIPX2_QCH));
-
- vipx_clk_enable(exynos->dev, CLK_INDEX(GATE_VIPX2_QCH_LOCAL));
- vipx_clk_get_rate(exynos->dev, CLK_INDEX(GATE_VIPX2_QCH_LOCAL));
-
- vipx_info("[%s]\n", __func__);
- return 0;
-}
-
-int vipx_exynos_clk_off(struct vipx_exynos *exynos)
-{
- vipx_clk_disable(exynos->dev, CLK_INDEX(UMUX_CLKCMU_VIPX1_BUS));
- vipx_clk_disable(exynos->dev, CLK_INDEX(GATE_VIPX1_QCH));
- vipx_clk_disable(exynos->dev, CLK_INDEX(UMUX_CLKCMU_VIPX2_BUS));
- vipx_clk_disable(exynos->dev, CLK_INDEX(GATE_VIPX2_QCH));
- vipx_clk_disable(exynos->dev, CLK_INDEX(GATE_VIPX2_QCH_LOCAL));
-
- vipx_info("[%s]\n", __func__);
- return 0;
-}
-
-int vipx_exynos_clk_dump(struct vipx_exynos *exynos)
-{
- int ret = 0;
-
- return ret;
-}
-
-int dummy_vipx_exynos_clk_cfg(struct vipx_exynos *exynos) { return 0; }
-int dummy_vipx_exynos_clk_on(struct vipx_exynos *exynos) { return 0; }
-int dummy_vipx_exynos_clk_off(struct vipx_exynos *exynos) { return 0; }
-int dummy_vipx_exynos_clk_dump(struct vipx_exynos *exynos) { return 0; }
-
-const struct vipx_clk_ops vipx_clk_ops = {
-#ifdef CONFIG_EXYNOS_VIPX_HARDWARE
- .clk_cfg = vipx_exynos_clk_cfg,
- .clk_on = vipx_exynos_clk_on,
- .clk_off = vipx_exynos_clk_off,
- .clk_dump = vipx_exynos_clk_dump
-#else
- .clk_cfg = dummy_vipx_exynos_clk_cfg,
- .clk_on = dummy_vipx_exynos_clk_on,
- .clk_off = dummy_vipx_exynos_clk_off,
- .clk_dump = dummy_vipx_exynos_clk_dump
-#endif
-};
-
-int vipx_exynos_ctl_reset(struct vipx_exynos *exynos, bool hold)
-{
- u32 val;
-
- vipx_readl(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_GLOBAL_CTRL], &val);
- val = val | 0xF1;
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_GLOBAL_CTRL], val);
- mdelay(1);
-
- vipx_readl(exynos->regbase[VIPX_REG_CPU_SS2], &vipx_regs_cpu_ss2[VIPX_CPU_SS2_GLOBAL_CTRL], &val);
- val = val | 0xF1;
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS2], &vipx_regs_cpu_ss2[VIPX_CPU_SS2_GLOBAL_CTRL], val);
- mdelay(1);
-
- vipx_readl(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_CPU_CTRL], &val);
- val = val | 0x1;
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_CPU_CTRL], val);
- mdelay(1);
-
- vipx_readl(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_CORTEX_CONTROL], &val);
- val = val | 0x1;
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_CORTEX_CONTROL], val);
- mdelay(1);
-
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_CPU_CTRL], 0x0);
- mdelay(1);
-
- /* QACTIVE */
- vipx_readl(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_QCHANNEL], &val);
- val = val | 0x1;
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_QCHANNEL], val);
- mdelay(1);
-
- vipx_readl(exynos->regbase[VIPX_REG_CPU_SS2], &vipx_regs_cpu_ss2[VIPX_CPU_SS2_QCHANNEL], &val);
- val = val | 0x1;
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS2], &vipx_regs_cpu_ss2[VIPX_CPU_SS2_QCHANNEL], val);
- mdelay(1);
-
- vipx_info("hold(%d)\n", hold);
- return 0;
-}
-
-int vipx_exynos_ctl_dump(struct vipx_exynos *exynos, u32 instance)
-{
-
- vipx_info("instance(%d)\n", instance);
- return 0;
-}
-
-void *vipx_exynos_ctl_remap(struct vipx_exynos *exynos, u32 instance)
-{
- BUG_ON(!exynos);
- BUG_ON(instance >= VIPX_REG_MAX_CNT);
-
- vipx_info("instance(%d)\n", instance);
- return exynos->regbase[instance];
-}
-
-int vipx_exynos_ctl_unmap(struct vipx_exynos *exynos, u32 instance, void *base)
-{
- BUG_ON(!exynos);
- BUG_ON(!base);
- BUG_ON(instance >= VIPX_REG_MAX_CNT);
-
- vipx_info("instance(%d)\n", instance);
- return 0;
-}
-
-int vipx_exynos_ctl_trigger(struct vipx_exynos *exynos, u32 chain_id)
-{
-
- vipx_info("chain_id(%d)\n", chain_id);
- return 0;
-}
-
-int vipx_exynos_ctl_start(struct vipx_exynos *exynos, u32 chain_id)
-{
- vipx_writel(exynos->regbase[VIPX_REG_CPU_SS1], &vipx_regs_cpu_ss1[VIPX_CPU_SS1_CORTEX_CONTROL], 0x0);
-
- vipx_info("chain_id(%d) %x %x\n", chain_id,exynos->regbase[VIPX_REG_CPU_SS1],exynos->regbase[VIPX_REG_CPU_SS2]);
- return 0;
-}
-
-int dummy_vipx_exynos_ctl_reset(struct vipx_exynos *exynos, bool hold) { return 0; }
-int dummy_vipx_exynos_ctl_dump(struct vipx_exynos *exynos, u32 instance) { return 0; }
-void *dummy_vipx_exynos_ctl_remap(struct vipx_exynos *exynos, u32 instance) { return NULL; }
-int dummy_vipx_exynos_ctl_unmap(struct vipx_exynos *exynos, u32 instance, void *base) { return 0; }
-int dummy_vipx_exynos_ctl_trigger(struct vipx_exynos *exynos, u32 chain_id) { return 0; }
-int dummy_vipx_exynos_ctl_start(struct vipx_exynos *exynos, u32 chain_id) { return 0; }
-
-const struct vipx_ctl_ops vipx_ctl_ops = {
-#ifdef CONFIG_EXYNOS_VIPX_HARDWARE
- .ctl_reset = vipx_exynos_ctl_reset,
- .ctl_dump = vipx_exynos_ctl_dump,
- .ctl_remap = vipx_exynos_ctl_remap,
- .ctl_unmap = vipx_exynos_ctl_unmap,
- .ctl_trigger = vipx_exynos_ctl_trigger,
- .ctl_start = vipx_exynos_ctl_start,
-#else
- .ctl_reset = dummy_vipx_exynos_ctl_reset,
- .ctl_dump = dummy_vipx_exynos_ctl_dump,
- .ctl_remap = dummy_vipx_exynos_ctl_remap,
- .ctl_unmap = dummy_vipx_exynos_ctl_unmap,
- .ctl_trigger = dummy_vipx_exynos_ctl_trigger,
- .ctl_start = dummy_vipx_exynos_ctl_start,
-#endif
-};
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPX driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_EXYNOS9610_H_
-#define VIPX_EXYNOS9610_H_
-
-enum vipx_reg_cpu_ss1_name {
- VIPX_CPU_SS1_VERSION_ID,
- VIPX_CPU_SS1_QCHANNEL,
- VIPX_CPU_SS1_IRQ_VIP_TO_HOST,
- VIPX_CPU_SS1_IRQ0_HOST_TO_VIP,
- VIPX_CPU_SS1_IRQ1_HOST_TO_VIP,
- VIPX_CPU_SS1_GLOBAL_CTRL,
- VIPX_CPU_SS1_CORTEX_CONTROL,
- VIPX_CPU_SS1_CPU_CTRL,
-};
-
-enum vipx_reg_cpu_ss2_name {
- VIPX_CPU_SS2_QCHANNEL,
- VIPX_CPU_SS2_GLOBAL_CTRL,
-};
-
-struct vipx_reg vipx_regs_cpu_ss1[] = {
- {0x00000, "VERSION_ID"},
- {0x00004, "SS1_QCHANNEL"},
- {0x00008, "SS1_IRQ_VIP_TO_HOST"},
- {0x0000C, "SS1_IRQ0_HOST_TO_VIP"},
- {0x00010, "SS1_IRQ1_HOST_TO_VIP"},
- {0x00014, "SS1_GLOBAL_CTRL"},
- {0x00018, "SS1_CORTEX_CONTROL"},
- {0x0001C, "SS1_CPU_CTRL"},
-};
-
-struct vipx_reg vipx_regs_cpu_ss2[] = {
- {0x00000, "SS2_QCHANNEL"},
- {0x00004, "SS2_GLOBAL_CTRL"},
-};
-
-#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_CLK_H__
+#define __VIPX_CLK_H__
+
+#include <linux/clk.h>
+
+struct vipx_system;
+
+struct vipx_clk {
+ struct clk *clk;
+ const char *name;
+};
+
+struct vipx_clk_ops {
+ int (*clk_init)(struct vipx_system *sys);
+ void (*clk_deinit)(struct vipx_system *sys);
+ int (*clk_cfg)(struct vipx_system *sys);
+ int (*clk_on)(struct vipx_system *sys);
+ int (*clk_off)(struct vipx_system *sys);
+ int (*clk_dump)(struct vipx_system *sys);
+};
+
+extern const struct vipx_clk_ops vipx_clk_ops;
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_CTRL_H__
+#define __VIPX_CTRL_H__
+
+struct vipx_system;
+
+enum {
+ REG_SS1,
+ REG_SS2,
+ REG_MAX
+};
+
+struct vipx_reg {
+ const unsigned int offset;
+ const char *name;
+};
+
+struct vipx_ctrl_ops {
+ int (*reset)(struct vipx_system *sys);
+ int (*start)(struct vipx_system *sys);
+ int (*get_irq)(struct vipx_system *sys, int direction);
+ int (*set_irq)(struct vipx_system *sys, int direction, int val);
+ int (*clear_irq)(struct vipx_system *sys, int direction, int val);
+ int (*debug_dump)(struct vipx_system *sys);
+};
+
+extern const struct vipx_ctrl_ops vipx_ctrl_ops;
+
+#endif
+++ /dev/null
-#include <linux/io.h>
-#include <linux/pinctrl/consumer.h>
-
-#include "vipx-config.h"
-#include "vipx-exynos.h"
-
-extern struct vipx_clk vipx_clk_array[];
-extern const u32 vipx_clk_array_size;
-extern const struct vipx_clk_ops vipx_clk_ops;
-extern const struct vipx_ctl_ops vipx_ctl_ops;
-
-int vipx_clk_set_rate(struct device *dev, u32 index, ulong frequency)
-{
- int ret = 0;
- struct clk *clk;
-
- if (index >= vipx_clk_array_size) {
- vipx_err("index is invalid(%d >= %d)\n", index, vipx_clk_array_size);
- ret = -EINVAL;
- goto p_err;
- }
-
- clk= vipx_clk_array[index].clk;
- if (IS_ERR_OR_NULL(clk)) {
- vipx_err("clk is NULL(%d)\n", index);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = clk_set_rate(clk, frequency);
- if (ret) {
- vipx_err("clk_set_rate is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-ulong vipx_clk_get_rate(struct device *dev, u32 index)
-{
- ulong frequency;
- struct clk *clk;
-
- if (index >= vipx_clk_array_size) {
- vipx_err("index is invalid(%d >= %d)\n", index, vipx_clk_array_size);
- frequency = -EINVAL;
- goto p_err;
- }
-
- clk = vipx_clk_array[index].clk;
- if (IS_ERR_OR_NULL(clk)) {
- vipx_err("clk is NULL(%d)\n", index);
- frequency = -EINVAL;
- goto p_err;
- }
-
- frequency = clk_get_rate(clk);
-
-p_err:
- vipx_info("%s : %ldMhz\n", vipx_clk_array[index].name, frequency/1000000);
- return frequency;
-}
-
-int vipx_clk_enable(struct device *dev, u32 index)
-{
- int ret = 0;
- struct clk *clk;
-
- if (index >= vipx_clk_array_size) {
- vipx_err("index is invalid(%d >= %d)\n", index, vipx_clk_array_size);
- ret = -EINVAL;
- goto p_err;
- }
-
- clk = vipx_clk_array[index].clk;
- if (IS_ERR_OR_NULL(clk)) {
- vipx_err("clk is NULL(%d)\n", index);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- vipx_err("clk_prepare_enable is fail(%s)\n", vipx_clk_array[index].name);
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-int vipx_clk_disable(struct device *dev, u32 index)
-{
- int ret = 0;
- struct clk *clk;
-
- if (index >= vipx_clk_array_size) {
- vipx_err("index is invalid(%d >= %d)\n", index, vipx_clk_array_size);
- ret = -EINVAL;
- goto p_err;
- }
-
- clk = vipx_clk_array[index].clk;
- if (IS_ERR_OR_NULL(clk)) {
- vipx_err("clk is NULL(%d)\n", index);
- ret = -EINVAL;
- goto p_err;
- }
-
- clk_disable_unprepare(clk);
-
-p_err:
- return ret;
-}
-
-static int vipx_exynos_clk_init(struct device *dev)
-{
- int ret = 0;
- const char *name;
- struct clk *clk;
- u32 index;
-
- for (index = 0; index < vipx_clk_array_size; ++index) {
- name = vipx_clk_array[index].name;
- if (!name) {
- probe_err("name is NULL\n");
- ret = -EINVAL;
- break;
- }
-
- clk = clk_get(dev, name);
- if (IS_ERR_OR_NULL(clk)) {
- probe_err("%s clk is not found\n", name);
- ret = -EINVAL;
- break;
- }
-
- vipx_clk_array[index].clk = clk;
- }
-
- return ret;
-}
-
-int vipx_exynos_probe(struct vipx_exynos *exynos, struct device *dev,
- void **regs, void *ram0, void *ram1)
-{
- int ret = 0;
-
- BUG_ON(!exynos);
- BUG_ON(!dev);
-
- exynos->regbase = regs;
- exynos->ram0base = ram0;
- exynos->ram1base = ram1;
- exynos->clk_ops = &vipx_clk_ops;
- exynos->ctl_ops = &vipx_ctl_ops;
- exynos->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR_OR_NULL(exynos->pinctrl)) {
- probe_err("devm_pinctrl_get is fail");
- ret = PTR_ERR(exynos->pinctrl);
- goto p_err;
- }
-
- ret = vipx_exynos_clk_init(dev);
- if (ret) {
- probe_err("vipx_exynos_clk_init is fail(%d)", ret);
- goto p_err;
- }
-
-p_err:
- probe_info("%s():%d\n", __func__, ret);
- return ret;
-}
-
-void vipx_readl(void __iomem *base_addr, struct vipx_reg *reg, u32 *val)
-{
- *val = readl(base_addr + reg->offset);
-
-#ifdef DBG_HW_SFR
- vipx_info("[REG][%s][0x%04X], val(R):[0x%08X]\n", reg->name, reg->offset, *val);
-#endif
-}
-
-void vipx_writel(void __iomem *base_addr, struct vipx_reg *reg, u32 val)
-{
-#ifdef DBG_HW_SFR
- vipx_info("[REG][%s][0x%04X], val(W):[0x%08X]\n", reg->name, reg->offset, val);
-#endif
-
- writel(val, base_addr + reg->offset);
-}
-
-void vipx_readf(void __iomem *base_addr, struct vipx_reg *reg, struct vipx_field *field, u32 *val)
-{
- *val = (readl(base_addr + reg->offset) >> (field->bit_start)) & ((1 << (field->bit_width)) - 1);
-
-#ifdef DBG_HW_SFR
- vipx_info("[REG][%s][%s][0x%04X], val(R):[0x%08X]\n", reg->name, field->name, reg->offset, *val);
-#endif
-}
-
-void vipx_writef(void __iomem *base_addr, struct vipx_reg *reg, struct vipx_field *field, u32 val)
-{
- u32 mask, temp;
-
- mask = ((1 << field->bit_width) - 1);
- temp = readl(base_addr + reg->offset) & ~(mask << field->bit_start);
- temp |= (val & mask) << (field->bit_start);
-
-#ifdef DBG_HW_SFR
- vipx_info("[REG][%s][%s][0x%04X], val(W):[0x%08X]\n", reg->name, field->name, reg->offset, val);
-#endif
-
- writel(temp, base_addr + reg->offset);
-}
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/syscalls.h>
-#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
-#include "vipx-config.h"
+#include "vipx-log.h"
+#include "vipx-system.h"
#include "vipx-binary.h"
-#include "vipx-memory.h"
-static noinline_for_stack long __get_file_size(struct file *file)
+static noinline_for_stack loff_t __vipx_binary_get_file_size(struct file *file)
{
+ int ret;
struct kstat st;
- u32 request_mask = (STATX_MODE | STATX_SIZE);
-
- if (vfs_getattr(&file->f_path, &st, request_mask, KSTAT_QUERY_FLAGS))
- return -1;
- if (!S_ISREG(st.mode))
- return -1;
- if (st.size != (long)st.size)
- return -1;
+ unsigned int request_mask = (STATX_MODE | STATX_SIZE);
- return st.size;
-}
-
-int vipx_binary_init(struct vipx_binary *binary, struct device *dev)
-{
- int ret = 0;
-
- BUG_ON(!binary);
- BUG_ON(!dev);
+ vipx_enter();
+ ret = vfs_getattr(&file->f_path, &st, request_mask, KSTAT_QUERY_FLAGS);
+ if (ret) {
+ vipx_err("vfs_getattr failed (%d)\n", ret);
+ goto p_err;
+ }
- binary->dev = dev;
+ if (!S_ISREG(st.mode)) {
+ ret = -EINVAL;
+ vipx_err("file mode is not S_ISREG\n");
+ goto p_err;
+ }
- return ret;
+ vipx_leave();
+ return st.size;
+p_err:
+ return (loff_t)ret;
}
-int vipx_binary_read(struct vipx_binary *binary,
- char *path,
- char *name,
- void *target,
- size_t target_size)
+static int __vipx_binary_file_read(struct vipx_binary *bin, const char *path,
+ const char *name, void *target, size_t size)
{
- int ret = 0;
- loff_t pos = 0;
- const struct firmware *fw_blob;
- u8 *buf = NULL;
- struct file *fp;
+ int ret;
mm_segment_t old_fs;
- long fsize, nread;
char fname[VIPX_FW_NAME_LEN];
+ struct file *fp;
+ loff_t fsize, pos = 0;
+ ssize_t nread;
- BUG_ON(!binary);
-
- snprintf(fname, sizeof(fname), "%s%s", path, name);
+ vipx_enter();
+ snprintf(fname, sizeof(fname), "%s/%s", path, name);
old_fs = get_fs();
set_fs(KERNEL_DS);
fp = filp_open(fname, O_RDONLY, 0);
- if (IS_ERR_OR_NULL(fp)) {
+ if (IS_ERR(fp)) {
set_fs(old_fs);
- goto request_fw;
+ ret = PTR_ERR(fp);
+ vipx_err("filp_open(%s) is failed (%d)\n", fname, ret);
+ goto p_err_open;
}
- fsize = __get_file_size(fp);
+ fsize = __vipx_binary_get_file_size(fp);
if (fsize <= 0) {
- vipx_err("__get_file_size is fail(%ld)\n", fsize);
- ret = -EBADF;
- goto p_err;
- }
-
- buf = vmalloc(fsize);
- if (!buf) {
- vipx_err("vmalloc is fail\n");
- ret = -ENOMEM;
- goto p_err;
+ ret = (int)fsize;
+ goto p_err_size;
}
- nread = kernel_read(fp, buf, fsize, &pos);
- if (nread != fsize) {
- vipx_err("kernel_read is fail(%ld != %ld)\n", nread, fsize);
+ if (fsize > size) {
ret = -EIO;
- goto p_err;
+ vipx_err("file(%s) size is over (%lld > %zu)\n",
+ fname, fsize, size);
+ goto p_err_size;
}
- if (fsize > target_size) {
- vipx_err("image size is over(%ld > %ld)\n", fsize, target_size);
- ret = -EIO;
- goto p_err;
- }
-
- /* no cache operation, because target is sram of vipx */
-#ifdef CONFIG_EXYNOS_VIPX_HARDWARE
- memcpy(target, (void *)buf, fsize);
-#endif
- vipx_info("FW(%s, %ld) were applied successfully.\n", fname, fsize);
-
-p_err:
- if (buf)
- vfree(buf);
-
+ nread = kernel_read(fp, target, fsize, &pos);
filp_close(fp, current->files);
set_fs(old_fs);
+ if (nread < 0) {
+ ret = (int)nread;
+ vipx_err("kernel_read(%s) is fail (%d)\n", fname, ret);
+ goto p_err_read;
+ }
+
+ vipx_leave();
+ return 0;
+p_err_read:
+p_err_size:
+p_err_open:
return ret;
+}
-request_fw:
- ret = request_firmware(&fw_blob, name, binary->dev);
- if (ret) {
- vipx_err("request_firmware(%s) is fail(%d)", name, ret);
- ret = -EINVAL;
- goto request_err;
- }
+static int __vipx_binary_firmware_load(struct vipx_binary *bin,
+ const char *name, void *target, size_t size)
+{
+ int ret;
+ const struct firmware *fw_blob;
- if (!fw_blob) {
- vipx_err("fw_blob is NULL\n");
+ vipx_enter();
+ if (!target) {
ret = -EINVAL;
- goto request_err;
+ vipx_err("binary(%s) memory is NULL\n", name);
+ goto p_err_target;
}
- if (!fw_blob->data) {
- vipx_err("fw_blob->data is NULL\n");
- ret = -EINVAL;
- goto request_err;
+ ret = request_firmware(&fw_blob, name, bin->dev);
+ if (ret) {
+ vipx_err("request_firmware(%s) is fail (%d)\n", name, ret);
+ goto p_err_req;
}
- if (fw_blob->size > target_size) {
- vipx_err("image size is over(%ld > %ld)\n", fw_blob->size, target_size);
+ if (fw_blob->size > size) {
ret = -EIO;
- goto request_err;
+ vipx_err("binary(%s) size is over (%ld > %ld)\n",
+ name, fw_blob->size, size);
+ goto p_err_size;
}
-#ifdef CONFIG_EXYNOS_VIPX_HARDWARE
memcpy(target, fw_blob->data, fw_blob->size);
-#endif
- vipx_info("Binay(%s, %ld) were applied successfully.\n", name, fw_blob->size);
+ release_firmware(fw_blob);
-request_err:
+ vipx_leave();
+ return 0;
+p_err_size:
release_firmware(fw_blob);
+p_err_req:
+p_err_target:
return ret;
}
-int vipx_binary_write(struct vipx_binary *binary,
- char *path,
- char *name,
- void *target,
- size_t target_size)
+int vipx_binary_read(struct vipx_binary *bin, const char *path,
+ const char *name, void *target, size_t size)
{
- int ret = 0;
- struct file *fp;
- mm_segment_t old_fs;
- long nwrite;
- loff_t pos = 0;
- char fname[VIPX_FW_NAME_LEN];
+ int ret;
+
+ vipx_enter();
+
+ if (path)
+ ret = __vipx_binary_file_read(bin, path, name, target, size);
+ else
+ ret = __vipx_binary_firmware_load(bin, name, target, size);
+
+ vipx_leave();
+ return ret;
+}
- BUG_ON(!binary);
+int vipx_binary_write(struct vipx_binary *bin, const char *path,
+ const char *name, void *target, size_t size)
+{
+ int ret;
+ char fname[VIPX_FW_NAME_LEN];
+ mm_segment_t old_fs;
+ struct file *fp;
+ ssize_t nwrite;
+ loff_t pos = 0;
- snprintf(fname, sizeof(fname), "%s%s", path, name);
+ vipx_enter();
+ snprintf(fname, sizeof(fname), "%s/%s", path, name);
old_fs = get_fs();
set_fs(KERNEL_DS);
fp = filp_open(fname, O_RDWR | O_CREAT, 0);
- if (IS_ERR_OR_NULL(fp)) {
+ if (IS_ERR(fp)) {
set_fs(old_fs);
- vipx_err("filp_open is fail(%p)\n", fp);
- ret = -EBADF;
+ ret = PTR_ERR(fp);
+ vipx_err("filp_open(%s) is fail (%d)\n", fname, ret);
goto p_err;
}
- vipx_info("debug kva(0x%p), size(%ld)\n", target, target_size);
+ nwrite = kernel_write(fp, target, size, &pos);
+ filp_close(fp, current->files);
+ set_fs(old_fs);
-
- nwrite = kernel_write(fp, target, target_size, &pos);
- if (nwrite != target_size) {
- filp_close(fp, current->files);
- set_fs(old_fs);
- vipx_err("kernel_write is fail(%ld != %ld)\n", nwrite, target_size);
- ret = -EIO;
+ if (nwrite < 0) {
+ ret = (int)nwrite;
+ vipx_err("kernel_write(%s) is fail (%d)\n", fname, ret);
goto p_err;
}
- vipx_info("Binay(%s, %ld) were applied successfully.\n", fname, target_size);
-
- filp_close(fp, current->files);
- set_fs(old_fs);
-
+ vipx_leave();
+ return 0;
p_err:
return ret;
}
+
+int vipx_binary_init(struct vipx_system *sys)
+{
+ struct vipx_binary *bin;
+
+ vipx_enter();
+ bin = &sys->binary;
+ bin->dev = sys->dev;
+ vipx_leave();
+ return 0;
+}
+
+void vipx_binary_deinit(struct vipx_binary *bin)
+{
+ vipx_enter();
+ vipx_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_BINARY_H__
+#define __VIPX_BINARY_H__
+
+#include <linux/device.h>
+
+#define VIPX_DEBUG_BIN_PATH "/data"
+
+#define VIPX_FW_DRAM_NAME "CC_DRAM_CODE_FLASH.bin"
+#define VIPX_FW_ITCM_NAME "CC_ITCM_CODE_FLASH.bin"
+#define VIPX_FW_DTCM_NAME "CC_DTCM_CODE_FLASH.bin"
+
+#define VIPX_FW_NAME_LEN (100)
+#define VIPX_VERSION_SIZE (42)
+
+struct vipx_system;
+
+struct vipx_binary {
+ struct device *dev;
+};
+
+int vipx_binary_read(struct vipx_binary *bin, const char *path,
+ const char *name, void *target, size_t size);
+int vipx_binary_write(struct vipx_binary *bin, const char *path,
+ const char *name, void *target, size_t size);
+
+int vipx_binary_init(struct vipx_system *sys);
+void vipx_binary_deinit(struct vipx_binary *bin);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_COMMON_TYPE_H__
+#define __VIPX_COMMON_TYPE_H__
+
+#define MAX_INPUT_NUM (8)
+#define MAX_OUTPUT_NUM (8)
+#define MAX_PLANE_NUM (3)
+
+#define GET_COMMON_GRAPH_MODEL_ID(X) ({ \
+ union vipx_common_global_id temp_id; \
+ temp_id.num = (X); \
+ temp_id.head.model_id; \
+})
+
+enum vipx_common_addr_type {
+ VIPX_COMMON_V_ADDR,
+ VIPX_COMMON_DV_ADDR,
+ VIPX_COMMON_FD,
+};
+
+enum vipx_common_mem_attr {
+ VIPX_COMMON_CACHEALBE,
+ VIPX_COMMON_NON_CACHEALBE,
+};
+
+enum vipx_common_mem_type {
+ VIPX_COMMON_MEM_ION,
+ VIPX_COMMON_MEM_MALLOC,
+ VIPX_COMMON_MEM_ASHMEM
+};
+
+struct vipx_common_mem {
+ int fd;
+ unsigned int iova;
+ unsigned int size;
+ unsigned int offset;
+ unsigned char addr_type;
+ unsigned char mem_attr;
+ unsigned char mem_type;
+ unsigned char reserved[5];
+};
+
+union vipx_common_global_id {
+ struct {
+ unsigned int head :2;
+ unsigned int target :2;
+ unsigned int model_id :12;
+ unsigned int msg_id :16;
+ } head;
+ unsigned int num;
+};
+
+struct vipx_common_graph_info {
+ struct vipx_common_mem graph;
+ struct vipx_common_mem temp_buf;
+ struct vipx_common_mem weight;
+ struct vipx_common_mem bias;
+ unsigned int gid;
+ unsigned int user_para_size;
+ //unsigned char user_para[0] // varialbe size
+};
+
+struct vipx_common_execute_info {
+ unsigned int gid;
+ unsigned int macro_sg_offset;
+ unsigned int num_input;
+ unsigned int num_output;
+ struct vipx_common_mem input[MAX_INPUT_NUM][MAX_PLANE_NUM];
+ struct vipx_common_mem output[MAX_OUTPUT_NUM][MAX_PLANE_NUM];
+ unsigned int user_para_size;
+ unsigned int reserved;
+ //unsigned char user_para[0] // varialbe size
+};
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+
+#include "vipx-log.h"
+#include "vipx-core.h"
+#include "vipx-context.h"
+#include "vipx-ioctl.h"
+
+struct vs4l_graph32 {
+ __u32 id;
+ __u32 priority;
+ __u32 time;
+ __u32 flags;
+ __u32 size;
+ compat_caddr_t addr;
+};
+
+struct vs4l_format32 {
+ __u32 target;
+ __u32 format;
+ __u32 plane;
+ __u32 width;
+ __u32 height;
+};
+
+struct vs4l_format_list32 {
+ __u32 direction;
+ __u32 count;
+ compat_caddr_t formats;
+};
+
+struct vs4l_param32 {
+ __u32 target;
+ compat_caddr_t addr;
+ __u32 offset;
+ __u32 size;
+};
+
+struct vs4l_param_list32 {
+ __u32 count;
+ compat_caddr_t params;
+};
+
+struct vs4l_ctrl32 {
+ __u32 ctrl;
+ __u32 value;
+};
+
+struct vs4l_roi32 {
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+};
+
+struct vs4l_buffer32 {
+ struct vs4l_roi roi;
+ union {
+ compat_caddr_t userptr;
+ __s32 fd;
+ } m;
+ compat_caddr_t reserved;
+};
+
+struct vs4l_container32 {
+ __u32 type;
+ __u32 target;
+ __u32 memory;
+ __u32 reserved[4];
+ __u32 count;
+ compat_caddr_t buffers;
+};
+
+struct vs4l_container_list32 {
+ __u32 direction;
+ __u32 id;
+ __u32 index;
+ __u32 flags;
+ struct compat_timeval timestamp[6];
+ __u32 count;
+ __u32 user_params[MAX_NUM_OF_USER_PARAMS];
+ compat_caddr_t containers;
+};
+
+#define VS4L_VERTEXIOC_S_GRAPH32 \
+ _IOW('V', 0, struct vs4l_graph32)
+#define VS4L_VERTEXIOC_S_FORMAT32 \
+ _IOW('V', 1, struct vs4l_format_list32)
+#define VS4L_VERTEXIOC_S_PARAM32 \
+ _IOW('V', 2, struct vs4l_param_list32)
+#define VS4L_VERTEXIOC_S_CTRL32 \
+ _IOW('V', 3, struct vs4l_ctrl32)
+#define VS4L_VERTEXIOC_QBUF32 \
+ _IOW('V', 6, struct vs4l_container_list32)
+#define VS4L_VERTEXIOC_DQBUF32 \
+ _IOW('V', 7, struct vs4l_container_list32)
+
+struct vipx_ioc_load_kernel_binary32 {
+ unsigned int size;
+ unsigned int global_id;
+ int kernel_fd;
+ unsigned int kernel_size;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vipx_ioc_load_graph_info32 {
+ unsigned int size;
+ struct vipx_common_graph_info graph_info;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vipx_ioc_unload_graph_info32 {
+ unsigned int size;
+ unsigned int graph_id;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vipx_ioc_execute_submodel32 {
+ unsigned int size;
+ struct vipx_common_execute_info execute_info;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+#define VIPX_IOC_LOAD_KERNEL_BINARY32 \
+ _IOWR('V', 0, struct vipx_ioc_load_kernel_binary32)
+#define VIPX_IOC_LOAD_GRAPH_INFO32 \
+ _IOWR('V', 1, struct vipx_ioc_load_graph_info32)
+#define VIPX_IOC_UNLOAD_GRAPH_INFO32 \
+ _IOWR('V', 2, struct vipx_ioc_unload_graph_info32)
+#define VIPX_IOC_EXECUTE_SUBMODEL32 \
+ _IOWR('V', 3, struct vipx_ioc_execute_submodel32)
+
+static int __vipx_ioctl_get_graph32(struct vs4l_graph *karg,
+ struct vs4l_graph32 __user *uarg)
+{
+ int ret;
+ compat_caddr_t taddr;
+
+ vipx_enter();
+ if (get_user(karg->id, &uarg->id) ||
+ get_user(karg->priority, &uarg->priority) ||
+ get_user(karg->time, &uarg->time) ||
+ get_user(karg->flags, &uarg->flags) ||
+ get_user(karg->size, &uarg->size) ||
+ get_user(taddr, &uarg->addr)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [S_GRAPH32]\n");
+ goto p_err;
+ }
+
+ if (karg->size || taddr) {
+ ret = -EINVAL;
+ vipx_err("invalid parameters (%u/%u) [S_GRAPH32]\n",
+ karg->size, taddr);
+ goto p_err;
+ } else {
+ karg->addr = 0;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_graph32(struct vs4l_graph *karg,
+ struct vs4l_graph32 __user *uarg)
+{
+ vipx_enter();
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_format32(struct vs4l_format_list *karg,
+ struct vs4l_format_list32 __user *uarg)
+{
+ int ret;
+ unsigned int idx;
+ size_t size;
+ compat_caddr_t taddr;
+ struct vs4l_format32 __user *uformat;
+ struct vs4l_format *kformat;
+
+ vipx_enter();
+ if (get_user(karg->direction, &uarg->direction) ||
+ get_user(karg->count, &uarg->count) ||
+ get_user(taddr, &uarg->formats)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [S_FORMAT32]\n");
+ goto p_err;
+ }
+
+ uformat = compat_ptr(taddr);
+
+ size = karg->count * sizeof(*kformat);
+ kformat = kzalloc(size, GFP_KERNEL);
+ if (!kformat) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kformat (%zu)\n", size);
+ goto p_err;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ if (get_user(kformat[idx].target, &uformat[idx].target) ||
+ get_user(kformat[idx].format,
+ &uformat[idx].format) ||
+ get_user(kformat[idx].plane,
+ &uformat[idx].plane) ||
+ get_user(kformat[idx].width,
+ &uformat[idx].width) ||
+ get_user(kformat[idx].height,
+ &uformat[idx].height)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [S_FORMAT32]\n");
+ goto p_err_free;
+ }
+ }
+ karg->formats = kformat;
+
+ vipx_leave();
+ return 0;
+p_err_free:
+ kfree(kformat);
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_format32(struct vs4l_format_list *karg,
+ struct vs4l_format_list32 __user *uarg)
+{
+ vipx_enter();
+ kfree(karg->formats);
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_param32(struct vs4l_param_list *karg,
+ struct vs4l_param_list32 __user *uarg)
+{
+ int ret;
+ unsigned int idx;
+ size_t size;
+ compat_caddr_t taddr;
+ struct vs4l_param32 __user *uparam;
+ struct vs4l_param *kparam;
+
+ vipx_enter();
+ if (get_user(karg->count, &uarg->count) ||
+ get_user(taddr, &uarg->params)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [S_PARAM32]\n");
+ goto p_err;
+ }
+
+ uparam = compat_ptr(taddr);
+
+ size = karg->count * sizeof(*kparam);
+ kparam = kzalloc(size, GFP_KERNEL);
+ if (!kparam) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kparam (%zu)\n", size);
+ goto p_err;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ if (get_user(kparam[idx].target, &uparam[idx].target) ||
+ get_user(taddr, &uparam[idx].addr) ||
+ get_user(kparam[idx].offset,
+ &uparam[idx].offset) ||
+ get_user(kparam[idx].size,
+ &uparam[idx].size)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [S_PARAM32]\n");
+ goto p_err_free;
+ }
+
+ if (kparam[idx].size || taddr) {
+ ret = -EINVAL;
+ vipx_err("invalid parameters ([%d]%u/%u) [S_PARAM32]\n",
+ idx, kparam[idx].size, taddr);
+ goto p_err_free;
+ } else {
+ kparam[idx].addr = 0;
+ }
+ }
+
+ karg->params = kparam;
+ vipx_leave();
+ return 0;
+p_err_free:
+ kfree(kparam);
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_param32(struct vs4l_param_list *karg,
+ struct vs4l_param_list32 __user *uarg)
+{
+ vipx_enter();
+ kfree(karg->params);
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_ctrl32(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl32 __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ if (get_user(karg->ctrl, &uarg->ctrl) ||
+ get_user(karg->value, &uarg->value)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [S_CTRL32]\n");
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_ctrl32(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl32 __user *uarg)
+{
+ vipx_enter();
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_container32(struct vs4l_container_list *karg,
+ struct vs4l_container_list32 __user *uarg)
+{
+ int ret;
+ size_t size;
+ compat_caddr_t taddr;
+ unsigned int c_count, b_count;
+ struct vs4l_container32 *ucon;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer32 *ubuf;
+ struct vs4l_buffer *kbuf;
+
+ vipx_enter();
+ if (get_user(karg->direction, &uarg->direction) ||
+ get_user(karg->id, &uarg->id) ||
+ get_user(karg->index, &uarg->index) ||
+ get_user(karg->flags, &uarg->flags) ||
+ get_user(karg->count, &uarg->count) ||
+ copy_from_user(karg->user_params, uarg->user_params,
+ sizeof(karg->user_params)) ||
+ get_user(taddr, &uarg->containers)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [CONTAINER32]\n");
+ goto p_err;
+ }
+
+ ucon = compat_ptr(taddr);
+
+ size = karg->count * sizeof(*kcon);
+ kcon = kzalloc(size, GFP_KERNEL);
+ if (!kcon) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kcon (%zu)\n", size);
+ goto p_err;
+ }
+
+ karg->containers = kcon;
+
+ for (c_count = 0; c_count < karg->count; ++c_count) {
+ if (get_user(kcon[c_count].type, &ucon[c_count].type) ||
+ get_user(kcon[c_count].target,
+ &ucon[c_count].target) ||
+ get_user(kcon[c_count].memory,
+ &ucon[c_count].memory) ||
+ copy_from_user(kcon[c_count].reserved,
+ ucon[c_count].reserved,
+ sizeof(kcon[c_count].reserved)) ||
+ get_user(kcon[c_count].count,
+ &ucon[c_count].count) ||
+ get_user(taddr, &ucon[c_count].buffers)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [CONTAINER32]\n");
+ goto p_err_free;
+ }
+
+ ubuf = compat_ptr(taddr);
+
+ size = kcon[c_count].count * sizeof(*kbuf);
+ kbuf = kzalloc(size, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kbuf (%zu)\n", size);
+ goto p_err_free;
+ }
+
+ kcon[c_count].buffers = kbuf;
+
+ for (b_count = 0; b_count < kcon[c_count].count;
+ ++b_count) {
+ if (get_user(kbuf[b_count].roi.x,
+ &ubuf[b_count].roi.x) ||
+ get_user(kbuf[b_count].roi.y,
+ &ubuf[b_count].roi.y) ||
+ get_user(kbuf[b_count].roi.w,
+ &ubuf[b_count].roi.w) ||
+ get_user(kbuf[b_count].roi.h,
+ &ubuf[b_count].roi.h)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [CONTAINER32]\n");
+ goto p_err_free;
+ }
+
+ if (kcon[c_count].memory == VS4L_MEMORY_DMABUF) {
+ if (get_user(kbuf[b_count].m.fd,
+ &ubuf[b_count].m.fd)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [CONTAINER32]\n");
+ goto p_err_free;
+ }
+ } else {
+ if (get_user(kbuf[b_count].m.userptr,
+ &ubuf[b_count].m.userptr)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [CONTAINER32]\n");
+ goto p_err_free;
+ }
+ }
+ }
+ }
+
+ vipx_leave();
+ return 0;
+p_err_free:
+ for (c_count = 0; c_count < karg->count; ++c_count)
+ kfree(kcon[c_count].buffers);
+
+ kfree(kcon);
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_container32(struct vs4l_container_list *karg,
+ struct vs4l_container_list32 __user *uarg)
+{
+ unsigned int idx;
+
+ vipx_enter();
+ if (put_user(karg->id, &uarg->id) ||
+ put_user(karg->index, &uarg->index) ||
+ put_user(karg->flags, &uarg->flags) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_usec,
+ &uarg->timestamp[0].tv_usec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_usec,
+ &uarg->timestamp[1].tv_usec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_usec,
+ &uarg->timestamp[2].tv_usec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_usec,
+ &uarg->timestamp[3].tv_usec) ||
+ put_user(karg->timestamp[4].tv_sec,
+ &uarg->timestamp[4].tv_sec) ||
+ put_user(karg->timestamp[4].tv_usec,
+ &uarg->timestamp[4].tv_usec) ||
+ put_user(karg->timestamp[5].tv_sec,
+ &uarg->timestamp[5].tv_sec) ||
+ put_user(karg->timestamp[5].tv_usec,
+ &uarg->timestamp[5].tv_usec) ||
+ copy_to_user(uarg->user_params, karg->user_params,
+ sizeof(karg->user_params))) {
+ vipx_err("Copy failed to user [CONTAINER32]\n");
+ }
+
+ for (idx = 0; idx < karg->count; ++idx)
+ kfree(karg->containers[idx].buffers);
+
+ kfree(karg->containers);
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_load_kernel_binary32(
+ struct vipx_ioc_load_kernel_binary *karg,
+ struct vipx_ioc_load_kernel_binary32 __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ if (get_user(karg->size, &uarg->size) ||
+ get_user(karg->global_id, &uarg->global_id) ||
+ get_user(karg->kernel_fd, &uarg->kernel_fd) ||
+ get_user(karg->kernel_size, &uarg->kernel_size)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [Load kernel binary(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_load_kernel_binary32(
+ struct vipx_ioc_load_kernel_binary *karg,
+ struct vipx_ioc_load_kernel_binary32 __user *uarg)
+{
+ vipx_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vipx_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_load_graph_info32(
+ struct vipx_ioc_load_graph_info *karg,
+ struct vipx_ioc_load_graph_info32 __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ if (get_user(karg->size, &uarg->size)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [Load graph info(32)]\n");
+ goto p_err;
+ }
+
+ ret = copy_from_user(&karg->graph_info, &uarg->graph_info,
+ sizeof(uarg->graph_info));
+ if (ret) {
+ vipx_err("Copy failed from user [Load graph info(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_load_graph_info32(
+ struct vipx_ioc_load_graph_info *karg,
+ struct vipx_ioc_load_graph_info32 __user *uarg)
+{
+ vipx_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vipx_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_unload_graph_info32(
+ struct vipx_ioc_unload_graph_info *karg,
+ struct vipx_ioc_unload_graph_info32 __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ if (get_user(karg->size, &uarg->size) ||
+ get_user(karg->graph_id, &uarg->graph_id)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [Unload graph info(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_unload_graph_info32(
+ struct vipx_ioc_unload_graph_info *karg,
+ struct vipx_ioc_unload_graph_info32 __user *uarg)
+{
+ vipx_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vipx_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_execute_submodel32(
+ struct vipx_ioc_execute_submodel *karg,
+ struct vipx_ioc_execute_submodel32 __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ if (get_user(karg->size, &uarg->size)) {
+ ret = -EFAULT;
+ vipx_err("Copy failed [Execute submodel(32)]\n");
+ goto p_err;
+ }
+
+ ret = copy_from_user(&karg->execute_info, &uarg->execute_info,
+ sizeof(uarg->execute_info));
+ if (ret) {
+ vipx_err("Copy failed from user [Execute submodel(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_execute_submodel32(
+ struct vipx_ioc_execute_submodel *karg,
+ struct vipx_ioc_execute_submodel32 __user *uarg)
+{
+ vipx_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vipx_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vipx_leave();
+}
+
+long vipx_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct vipx_context *vctx;
+ const struct vipx_ioctl_ops *ops;
+ union vipx_ioc_arg karg;
+ void __user *uarg;
+
+ vipx_enter();
+ vctx = file->private_data;
+ ops = vctx->core->ioc_ops;
+ uarg = compat_ptr(arg);
+
+ switch (cmd) {
+ case VS4L_VERTEXIOC_S_GRAPH32:
+ ret = __vipx_ioctl_get_graph32(&karg.graph, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_graph(vctx, &karg.graph);
+ __vipx_ioctl_put_graph32(&karg.graph, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_FORMAT32:
+ ret = __vipx_ioctl_get_format32(&karg.flist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_format(vctx, &karg.flist);
+ __vipx_ioctl_put_format32(&karg.flist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_PARAM32:
+ ret = __vipx_ioctl_get_param32(&karg.plist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_param(vctx, &karg.plist);
+ __vipx_ioctl_put_param32(&karg.plist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_CTRL32:
+ ret = __vipx_ioctl_get_ctrl32(&karg.ctrl, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_ctrl(vctx, &karg.ctrl);
+ __vipx_ioctl_put_ctrl32(&karg.ctrl, uarg);
+ break;
+ case VS4L_VERTEXIOC_STREAM_ON:
+ ret = ops->streamon(vctx);
+ break;
+ case VS4L_VERTEXIOC_STREAM_OFF:
+ ret = ops->streamoff(vctx);
+ break;
+ case VS4L_VERTEXIOC_QBUF32:
+ ret = __vipx_ioctl_get_container32(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->qbuf(vctx, &karg.clist);
+ __vipx_ioctl_put_container32(&karg.clist, uarg);
+ break;
+ case VS4L_VERTEXIOC_DQBUF32:
+ ret = __vipx_ioctl_get_container32(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->dqbuf(vctx, &karg.clist);
+ __vipx_ioctl_put_container32(&karg.clist, uarg);
+ break;
+ case VIPX_IOC_LOAD_KERNEL_BINARY32:
+ ret = __vipx_ioctl_get_load_kernel_binary32(&karg.kernel_bin,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_kernel_binary(vctx, &karg.kernel_bin);
+ __vipx_ioctl_put_load_kernel_binary32(&karg.kernel_bin, uarg);
+ break;
+ case VIPX_IOC_LOAD_GRAPH_INFO32:
+ ret = __vipx_ioctl_get_load_graph_info32(&karg.load_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_graph_info(vctx, &karg.load_ginfo);
+ __vipx_ioctl_put_load_graph_info32(&karg.load_ginfo, uarg);
+ break;
+ case VIPX_IOC_UNLOAD_GRAPH_INFO32:
+ ret = __vipx_ioctl_get_unload_graph_info32(&karg.unload_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->unload_graph_info(vctx, &karg.unload_ginfo);
+ __vipx_ioctl_put_unload_graph_info32(&karg.unload_ginfo, uarg);
+ break;
+ case VIPX_IOC_EXECUTE_SUBMODEL32:
+ ret = __vipx_ioctl_get_execute_submodel32(&karg.exec, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->execute_submodel(vctx, &karg.exec);
+ __vipx_ioctl_put_execute_submodel32(&karg.exec, uarg);
+ break;
+ default:
+ ret = -EINVAL;
+ vipx_err("ioc command(%x) is not supported\n", cmd);
+ goto p_err;
+ }
+
+ vipx_leave();
+p_err:
+ return ret;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_CONFIG_H__
+#define __VIPX_CONFIG_H__
+
+/* CONFIG - GLOBAL OPTIONS */
+#define VIPX_MAX_BUFFER (16)
+#define VIPX_MAX_GRAPH (32)
+#define VIPX_MAX_TASK VIPX_MAX_BUFFER
+#define VIPX_MAX_PLANES (3)
+
+#define VIPX_MAX_PLANES (3)
+#define VIPX_MAX_TASKDESC (VIPX_MAX_GRAPH * 2)
+
+#define VIPX_MAX_CONTEXT (16)
+
+/* CONFIG - PLATFORM CONFIG */
+#define VIPX_STOP_WAIT_COUNT (200)
+//#define VIPX_MBOX_EMULATOR
+
+/* CONFIG - DEBUG OPTIONS */
+#define DEBUG_LOG_CONCATE_ENABLE
+//#define DEBUG_TIME_MEASURE
+//#define DEBUG_LOG_IO_WRITE
+//#define DEBUG_LOG_DUMP_REGION
+//#define DEBUG_LOG_MEMORY
+//#define DEBUG_LOG_CALL_TREE
+//#define DEBUG_LOG_MAILBOX_DUMP
+#define DEBUG_LOG_REQUEST_DUMP
+
+//#define TEMP_RT_FRAMEWORK_TEST
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vipx-log.h"
+#include "vipx-util.h"
+#include "vipx-kernel-binary.h"
+#include "vipx-context.h"
+
+#define KERNEL_BINARY_DEBUG
+#define LOAD_GRAPH_INFO_DEBUG
+#define UNLOAD_GRAPH_INFO_DEBUG
+#define EXECUTE_DEBUG
+
+static struct vipx_buffer *__vipx_context_create_buffer(
+ struct vipx_context *vctx,
+ struct vipx_common_mem *common_mem)
+{
+ int ret;
+ struct vipx_buffer *buf;
+ struct vipx_memory *mem;
+
+ vipx_enter();
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc buffer\n");
+ goto p_err;
+ }
+
+ if (unlikely(common_mem->size == 0)) {
+ ret = -EINVAL;
+ vipx_err("memory size is invalid (%u)\n", common_mem->size);
+ goto p_err_size;
+ }
+
+ mem = &vctx->core->system->memory;
+ buf->m.fd = common_mem->fd;
+ buf->size = common_mem->size;
+ ret = mem->mops->map_dmabuf(mem, buf);
+ if (unlikely(ret))
+ goto p_err_map;
+
+ common_mem->iova = (unsigned int)buf->dvaddr;
+
+ vipx_leave();
+ return buf;
+p_err_map:
+p_err_size:
+ kfree(buf);
+p_err:
+ return ERR_PTR(ret);
+}
+
+static void __vipx_context_destroy_buffer(struct vipx_context *vctx,
+ struct vipx_buffer *buf)
+{
+ struct vipx_memory *mem;
+
+ vipx_enter();
+ mem = &vctx->core->system->memory;
+ mem->mops->unmap_dmabuf(mem, buf);
+ kfree(buf);
+ vipx_leave();
+}
+
+static int vipx_context_load_kernel_binary(struct vipx_context *vctx,
+ struct vipx_ioc_load_kernel_binary *kernel_bin)
+{
+ int ret;
+
+ vipx_enter();
+#ifdef KERNEL_BINARY_DEBUG
+ vipx_dbg("Load kernel binary\n");
+ vipx_dbg("global_id : %d\n", kernel_bin->global_id);
+ vipx_dbg("kernel_fd : %d\n", kernel_bin->kernel_fd);
+ vipx_dbg("kernel_size : %d\n", kernel_bin->kernel_size);
+#endif
+
+ ret = vipx_kernel_binary_add(vctx, kernel_bin->global_id,
+ kernel_bin->kernel_fd, kernel_bin->kernel_size);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_context_load_graph_info(struct vipx_context *vctx,
+ struct vipx_ioc_load_graph_info *ginfo)
+{
+ int ret;
+ struct vipx_common_graph_info *common_ginfo;
+ struct vipx_graph_model *gmodel;
+ struct vipx_common_mem *common_mem;
+ struct vipx_buffer *buffer;
+
+ vipx_enter();
+ common_ginfo = &ginfo->graph_info;
+
+#ifdef LOAD_GRAPH_INFO_DEBUG
+ vipx_dbg("Load graph info\n");
+ vipx_dbg("Graph ID : %#x\n", common_ginfo->gid);
+ common_mem = &common_ginfo->graph;
+ vipx_dbg("--- Graph ---\n");
+ vipx_dbg("addr(fd) : %d\n", common_mem->fd);
+ vipx_dbg("size : %d\n", common_mem->size);
+ vipx_dbg("offset : %d\n", common_mem->offset);
+ vipx_dbg("addr_type: %d\n", common_mem->addr_type);
+ vipx_dbg("mem_attr : %d\n", common_mem->mem_attr);
+ vipx_dbg("mem_type : %d\n", common_mem->mem_type);
+
+ common_mem = &common_ginfo->temp_buf;
+ vipx_dbg("--- Temp_buf ---\n");
+ vipx_dbg("addr(fd) : %d\n", common_mem->fd);
+ vipx_dbg("size : %d\n", common_mem->size);
+ vipx_dbg("offset : %d\n", common_mem->offset);
+ vipx_dbg("addr_type: %d\n", common_mem->addr_type);
+ vipx_dbg("mem_attr : %d\n", common_mem->mem_attr);
+ vipx_dbg("mem_type : %d\n", common_mem->mem_type);
+
+ common_mem = &common_ginfo->weight;
+ vipx_dbg("--- Weight ---\n");
+ vipx_dbg("addr(fd) : %d\n", common_mem->fd);
+ vipx_dbg("size : %d\n", common_mem->size);
+ vipx_dbg("offset : %d\n", common_mem->offset);
+ vipx_dbg("addr_type: %d\n", common_mem->addr_type);
+ vipx_dbg("mem_attr : %d\n", common_mem->mem_attr);
+ vipx_dbg("mem_type : %d\n", common_mem->mem_type);
+
+ common_mem = &common_ginfo->bias;
+ vipx_dbg("--- Bias ---\n");
+ vipx_dbg("addr(fd) : %d\n", common_mem->fd);
+ vipx_dbg("size : %d\n", common_mem->size);
+ vipx_dbg("offset : %d\n", common_mem->offset);
+ vipx_dbg("addr_type: %d\n", common_mem->addr_type);
+ vipx_dbg("mem_attr : %d\n", common_mem->mem_attr);
+ vipx_dbg("mem_type : %d\n", common_mem->mem_type);
+#endif
+
+ gmodel = vctx->graph_ops->create_model(vctx->graph, common_ginfo);
+ if (IS_ERR(gmodel)) {
+ ret = PTR_ERR(gmodel);
+ goto p_err_gmodel;
+ }
+
+ common_mem = &gmodel->common_ginfo.graph;
+ buffer = __vipx_context_create_buffer(vctx, common_mem);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ vipx_err("Failed to create buffer for graph (%d)\n", ret);
+ goto p_err_graph;
+ }
+ gmodel->graph = buffer;
+
+ common_mem = &gmodel->common_ginfo.temp_buf;
+ buffer = __vipx_context_create_buffer(vctx, common_mem);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ vipx_err("Failed to create buffer for temp_buf (%d)\n", ret);
+ goto p_err_temp_buf;
+ }
+ gmodel->temp_buf = buffer;
+
+ common_mem = &gmodel->common_ginfo.weight;
+ buffer = __vipx_context_create_buffer(vctx, common_mem);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ vipx_err("Failed to create buffer for weight (%d)\n", ret);
+ goto p_err_weight;
+ }
+ gmodel->weight = buffer;
+
+ common_mem = &gmodel->common_ginfo.bias;
+ buffer = __vipx_context_create_buffer(vctx, common_mem);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ vipx_err("Failed to create buffer for bias (%d)\n", ret);
+ goto p_err_bias;
+ }
+ gmodel->bias = buffer;
+
+ ret = vctx->graph_ops->register_model(vctx->graph, gmodel);
+ if (ret)
+ goto p_err_register_model;
+
+ vipx_leave();
+ return 0;
+p_err_register_model:
+ __vipx_context_destroy_buffer(vctx, gmodel->bias);
+p_err_bias:
+ __vipx_context_destroy_buffer(vctx, gmodel->weight);
+p_err_weight:
+ __vipx_context_destroy_buffer(vctx, gmodel->temp_buf);
+p_err_temp_buf:
+ __vipx_context_destroy_buffer(vctx, gmodel->graph);
+p_err_graph:
+ vctx->graph_ops->destroy_model(vctx->graph, gmodel);
+p_err_gmodel:
+ return ret;
+}
+
+static int vipx_context_unload_graph_info(struct vipx_context *vctx,
+ struct vipx_ioc_unload_graph_info *ginfo)
+{
+ int ret;
+ struct vipx_graph_model *gmodel;
+
+ vipx_enter();
+#ifdef UNLOAD_GRAPH_INFO_DEBUG
+ vipx_dbg("Unload graph info\n");
+ vipx_dbg("Graph ID : %#x\n", ginfo->graph_id);
+#endif
+
+ gmodel = vctx->graph_ops->get_model(vctx->graph, ginfo->graph_id);
+ if (IS_ERR(gmodel)) {
+ ret = PTR_ERR(gmodel);
+ goto p_err_get_model;
+ }
+
+ vctx->graph_ops->unregister_model(vctx->graph, gmodel);
+
+ __vipx_context_destroy_buffer(vctx, gmodel->bias);
+ __vipx_context_destroy_buffer(vctx, gmodel->weight);
+ __vipx_context_destroy_buffer(vctx, gmodel->temp_buf);
+ __vipx_context_destroy_buffer(vctx, gmodel->graph);
+
+ vctx->graph_ops->destroy_model(vctx->graph, gmodel);
+
+ vipx_leave();
+ return 0;
+p_err_get_model:
+ return ret;
+}
+
+static int vipx_context_execute_submodel(struct vipx_context *vctx,
+ struct vipx_ioc_execute_submodel *execute)
+{
+ int ret;
+ int idx;
+ int num_input, num_output;
+
+ struct vipx_common_execute_info *execute_info;
+ struct vipx_graph_model *gmodel;
+ struct vipx_common_mem *common_mem;
+ struct vipx_buffer **buffer;
+ int buffer_count = 0, translate_count = 0;
+
+ vipx_enter();
+
+ execute_info = &execute->execute_info;
+
+ num_input = execute_info->num_input;
+ if (unlikely(num_input > MAX_INPUT_NUM)) {
+ ret = -EINVAL;
+ vipx_err("num_input[%d] is more than MAX[%d]\n", num_input,
+ MAX_INPUT_NUM);
+ goto p_err_num;
+ }
+ //TODO check plane count of buffer
+ buffer_count += num_input;
+
+ num_output = execute_info->num_output;
+ if (unlikely(num_output > MAX_OUTPUT_NUM)) {
+ ret = -EINVAL;
+ vipx_err("num_output[%d] is more than MAX[%d]\n", num_output,
+ MAX_INPUT_NUM);
+ goto p_err_num;
+ }
+ //TODO check plane count of buffer
+ buffer_count += num_output;
+
+#ifdef EXECUTE_DEBUG
+ vipx_dbg("=====Execute submodel====\n");
+ vipx_dbg("Global ID : %#x\n", execute_info->gid);
+ vipx_dbg("macro_sg_offset : %d\n", execute_info->macro_sg_offset);
+ vipx_dbg("num_input : %d\n", execute_info->num_input);
+ vipx_dbg("num_output : %d\n", execute_info->num_output);
+
+ for (idx = 0; idx < num_input; ++idx) {
+ common_mem = &execute_info->input[idx][0];
+ vipx_dbg("IN [%d/%d]\n", idx, num_input - 1);
+ vipx_dbg("fd : %d\n", common_mem->fd);
+ vipx_dbg("size : %d\n", common_mem->size);
+ vipx_dbg("addr : %u\n", common_mem->iova);
+ }
+
+ for (idx = 0; idx < num_output; ++idx) {
+ common_mem = &execute_info->output[idx][0];
+ vipx_dbg("OUT [%d/%d]\n", idx, num_output - 1);
+ vipx_dbg("fd : %d\n", common_mem->fd);
+ vipx_dbg("size : %d\n", common_mem->size);
+ vipx_dbg("addr : %u\n", common_mem->iova);
+ }
+
+ vipx_dbg("user_para_size : %d\n", execute_info->user_para_size);
+#endif
+
+ gmodel = vctx->graph_ops->get_model(vctx->graph, execute_info->gid);
+ if (unlikely(IS_ERR(gmodel))) {
+ ret = PTR_ERR(gmodel);
+ goto p_err_get_model;
+ }
+
+ vipx_kernel_binary_set_gmodel(vctx, gmodel);
+ if (!gmodel->kbin_count) {
+ ret = -ENODATA;
+ vipx_err("No kernel binary to set at graph model(%#x)\n",
+ gmodel->id);
+ goto p_err_set_model;
+ }
+
+ buffer = kmalloc(sizeof(*buffer) * buffer_count, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc buffer for in/out (%d)\n",
+ buffer_count);
+ goto p_err_alloc;
+ }
+
+ //TODO check plane count of buffer
+ for (idx = 0; idx < num_input; ++idx) {
+ common_mem = &execute_info->input[idx][0];
+ buffer[translate_count] = __vipx_context_create_buffer(vctx,
+ common_mem);
+ if (unlikely(IS_ERR(buffer[translate_count]))) {
+ ret = PTR_ERR(buffer[translate_count]);
+ vipx_err("Failed to translate input(%d/%d,%d/%d,%d)\n",
+ idx, num_input,
+ translate_count, buffer_count, ret);
+ goto p_err_buffer;
+ }
+ translate_count++;
+ }
+
+ //TODO check plane count of buffer
+ for (idx = 0; idx < num_output; ++idx) {
+ common_mem = &execute_info->output[idx][0];
+ buffer[translate_count] = __vipx_context_create_buffer(vctx,
+ common_mem);
+ if (unlikely(IS_ERR(buffer[translate_count]))) {
+ ret = PTR_ERR(buffer[translate_count]);
+ vipx_err("Failed to translate output(%d/%d,%d/%d,%d)\n",
+ idx, num_output,
+ translate_count, buffer_count, ret);
+ goto p_err_buffer;
+ }
+ translate_count++;
+ }
+
+ ret = vctx->graph_ops->execute_model(vctx->graph, gmodel, execute_info);
+ if (ret)
+ goto p_err_execute;
+
+ for (idx = 0; idx < translate_count; ++idx)
+ __vipx_context_destroy_buffer(vctx, buffer[idx]);
+
+ vipx_leave();
+ return 0;
+p_err_execute:
+p_err_buffer:
+ for (idx = 0; idx < translate_count; ++idx)
+ __vipx_context_destroy_buffer(vctx, buffer[idx]);
+
+ kfree(buffer);
+p_err_alloc:
+p_err_set_model:
+p_err_get_model:
+p_err_num:
+ return ret;
+}
+
+static const struct vipx_context_ops vipx_context_ops = {
+ .load_kernel_binary = vipx_context_load_kernel_binary,
+ .load_graph_info = vipx_context_load_graph_info,
+ .unload_graph_info = vipx_context_unload_graph_info,
+ .execute_submodel = vipx_context_execute_submodel
+};
+
+struct vipx_context *vipx_context_create(struct vipx_core *core)
+{
+ int ret;
+ struct vipx_context *vctx;
+
+ vipx_enter();
+ vctx = kzalloc(sizeof(*vctx), GFP_KERNEL);
+ if (!vctx) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc context\n");
+ goto p_err;
+ }
+ vctx->core = core;
+
+ vctx->idx = vipx_util_bitmap_get_zero_bit(core->vctx_map,
+ VIPX_MAX_CONTEXT);
+ if (vctx->idx == VIPX_MAX_CONTEXT) {
+ ret = -ENOMEM;
+ vipx_err("Failed to get idx of context\n");
+ goto p_err_bitmap;
+ }
+
+ core->vctx_count++;
+ list_add_tail(&vctx->list, &core->vctx_list);
+
+ mutex_init(&vctx->lock);
+
+ vctx->vops = &vipx_context_ops;
+ vctx->binary_count = 0;
+ INIT_LIST_HEAD(&vctx->binary_list);
+ spin_lock_init(&vctx->binary_slock);
+
+ vipx_queue_init(vctx);
+
+ vctx->state = BIT(VIPX_CONTEXT_OPEN);
+ vipx_leave();
+ return vctx;
+p_err_bitmap:
+ kfree(vctx);
+p_err:
+ return ERR_PTR(ret);
+}
+
+void vipx_context_destroy(struct vipx_context *vctx)
+{
+ struct vipx_core *core;
+
+ vipx_enter();
+ core = vctx->core;
+
+ if (vctx->binary_count)
+ vipx_kernel_binary_all_remove(vctx);
+
+ mutex_destroy(&vctx->lock);
+ list_del(&vctx->list);
+ core->vctx_count--;
+ vipx_util_bitmap_clear_bit(core->vctx_map, vctx->idx);
+ kfree(vctx);
+ vipx_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_CONTEXT_H__
+#define __VIPX_CONTEXT_H__
+
+#include <linux/mutex.h>
+
+#include "vipx-queue.h"
+#include "vipx-graph.h"
+#include "vipx-ioctl.h"
+#include "vipx-core.h"
+
+enum vipx_context_state {
+ VIPX_CONTEXT_OPEN,
+ VIPX_CONTEXT_GRAPH,
+ VIPX_CONTEXT_FORMAT,
+ VIPX_CONTEXT_START,
+ VIPX_CONTEXT_STOP
+};
+
+struct vipx_context;
+
+struct vipx_context_ops {
+ int (*load_kernel_binary)(struct vipx_context *vctx,
+ struct vipx_ioc_load_kernel_binary *kernel_bin);
+ int (*load_graph_info)(struct vipx_context *vctx,
+ struct vipx_ioc_load_graph_info *ginfo);
+ int (*unload_graph_info)(struct vipx_context *vctx,
+ struct vipx_ioc_unload_graph_info *ginfo);
+ int (*execute_submodel)(struct vipx_context *vctx,
+ struct vipx_ioc_execute_submodel *execute);
+};
+
+struct vipx_context_qops {
+ int (*poll)(struct vipx_queue_list *qlist,
+ struct file *file, struct poll_table_struct *poll);
+ int (*set_graph)(struct vipx_queue_list *qlist,
+ struct vs4l_graph *ginfo);
+ int (*set_format)(struct vipx_queue_list *qlist,
+ struct vs4l_format_list *flist);
+ int (*set_param)(struct vipx_queue_list *qlist,
+ struct vs4l_param_list *plist);
+ int (*set_ctrl)(struct vipx_queue_list *qlist,
+ struct vs4l_ctrl *ctrl);
+ int (*qbuf)(struct vipx_queue_list *qlist,
+ struct vs4l_container_list *clist);
+ int (*dqbuf)(struct vipx_queue_list *qlist,
+ struct vs4l_container_list *clist);
+ int (*streamon)(struct vipx_queue_list *qlist);
+ int (*streamoff)(struct vipx_queue_list *qlist);
+};
+
+struct vipx_context_gops {
+ struct vipx_graph_model *(*create_model)(struct vipx_graph *graph,
+ struct vipx_common_graph_info *ginfo);
+ struct vipx_graph_model *(*get_model)(struct vipx_graph *graph,
+ unsigned int id);
+ int (*destroy_model)(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel);
+ int (*register_model)(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel);
+ int (*unregister_model)(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel);
+ int (*start_model)(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel);
+ int (*stop_model)(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel);
+ int (*execute_model)(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel,
+ struct vipx_common_execute_info *einfo);
+};
+
+struct vipx_context {
+ unsigned int state;
+ unsigned int idx;
+ struct list_head list;
+ struct mutex lock;
+
+ const struct vipx_context_ops *vops;
+ int binary_count;
+ struct list_head binary_list;
+ spinlock_t binary_slock;
+ const struct vipx_context_gops *graph_ops;
+
+ const struct vipx_context_qops *queue_ops;
+ struct vipx_queue_list queue_list;
+
+ struct vipx_core *core;
+ struct vipx_graph *graph;
+};
+
+struct vipx_context *vipx_context_create(struct vipx_core *core);
+void vipx_context_destroy(struct vipx_context *vctx);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vipx-log.h"
+#include "vipx-util.h"
+#include "vipx-ioctl.h"
+#include "vipx-device.h"
+#include "vipx-graph.h"
+#include "vipx-context.h"
+#include "vipx-core.h"
+
+static inline int __vref_get(struct vipx_core_refcount *vref)
+{
+ int ret;
+
+ vipx_enter();
+ ret = (atomic_inc_return(&vref->refcount) == 1) ?
+ vref->first(vref->core) : 0;
+ if (ret)
+ atomic_dec(&vref->refcount);
+ vipx_leave();
+ return ret;
+}
+
+static inline int __vref_put(struct vipx_core_refcount *vref)
+{
+ int ret;
+
+ vipx_enter();
+ ret = (atomic_dec_return(&vref->refcount) == 0) ?
+ vref->final(vref->core) : 0;
+ if (ret)
+ atomic_inc(&vref->refcount);
+ vipx_leave();
+ return ret;
+}
+
+static int vipx_core_set_graph(struct vipx_context *vctx,
+ struct vs4l_graph *ginfo)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for set_graph (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VIPX_CONTEXT_OPEN))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not open\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->set_graph(&vctx->queue_list, ginfo);
+ if (ret)
+ goto p_err_queue_ops;
+
+ vctx->state = BIT(VIPX_CONTEXT_GRAPH);
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_set_format(struct vipx_context *vctx,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for set_format (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & (BIT(VIPX_CONTEXT_GRAPH) |
+ BIT(VIPX_CONTEXT_FORMAT)))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not graph/format\n",
+ vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->set_format(&vctx->queue_list, flist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ vctx->state = BIT(VIPX_CONTEXT_FORMAT);
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_set_param(struct vipx_context *vctx,
+ struct vs4l_param_list *plist)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for set_param (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VIPX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->set_param(&vctx->queue_list, plist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_set_ctrl(struct vipx_context *vctx,
+ struct vs4l_ctrl *ctrl)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for set_ctrl (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->queue_ops->set_ctrl(&vctx->queue_list, ctrl);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_qbuf(struct vipx_context *vctx,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for qbuf (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VIPX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->qbuf(&vctx->queue_list, clist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_dqbuf(struct vipx_context *vctx,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for dqbuf (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VIPX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->dqbuf(&vctx->queue_list, clist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_streamon(struct vipx_context *vctx)
+{
+ int ret;
+ struct vipx_core *core;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for stream-on (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & (BIT(VIPX_CONTEXT_FORMAT) |
+ BIT(VIPX_CONTEXT_STOP)))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not format/stop\n", vctx->state);
+ goto p_err_state;
+ }
+
+ core = vctx->core;
+ ret = __vref_get(&core->start_cnt);
+ if (ret) {
+ vipx_err("vref_get(start) is fail(%d)\n", ret);
+ goto p_err_vref;
+ }
+
+ ret = vctx->queue_ops->streamon(&vctx->queue_list);
+ if (ret)
+ goto p_err_queue_ops;
+
+ vctx->state = BIT(VIPX_CONTEXT_START);
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_queue_ops:
+p_err_vref:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_streamoff(struct vipx_context *vctx)
+{
+ int ret;
+ struct vipx_core *core;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock context lock for stream-off (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VIPX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vipx_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->streamoff(&vctx->queue_list);
+ if (ret)
+ goto p_err_queue_ops;
+
+ core = vctx->core;
+ ret = __vref_put(&core->start_cnt);
+ if (ret) {
+ vipx_err("vref_put(start) is fail(%d)\n", ret);
+ goto p_err_vref;
+ }
+
+ vctx->state = BIT(VIPX_CONTEXT_STOP);
+ mutex_unlock(&vctx->lock);
+ vipx_leave();
+ return 0;
+p_err_vref:
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_core_load_kernel_binary(struct vipx_context *vctx,
+ struct vipx_ioc_load_kernel_binary *args)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock for loading kernel binary (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->load_kernel_binary(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vipx_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+static int vipx_core_load_graph_info(struct vipx_context *vctx,
+ struct vipx_ioc_load_graph_info *args)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock for loadding graph info (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->load_graph_info(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vipx_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+static int vipx_core_unload_graph_info(struct vipx_context *vctx,
+ struct vipx_ioc_unload_graph_info *args)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock for unloading graph info (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->unload_graph_info(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vipx_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+static int vipx_core_execute_submodel(struct vipx_context *vctx,
+ struct vipx_ioc_execute_submodel *args)
+{
+ int ret;
+
+ vipx_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock for executing submodel (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->execute_submodel(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vipx_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+const struct vipx_ioctl_ops vipx_core_ioctl_ops = {
+ .set_graph = vipx_core_set_graph,
+ .set_format = vipx_core_set_format,
+ .set_param = vipx_core_set_param,
+ .set_ctrl = vipx_core_set_ctrl,
+ .qbuf = vipx_core_qbuf,
+ .dqbuf = vipx_core_dqbuf,
+ .streamon = vipx_core_streamon,
+ .streamoff = vipx_core_streamoff,
+
+ .load_kernel_binary = vipx_core_load_kernel_binary,
+ .load_graph_info = vipx_core_load_graph_info,
+ .unload_graph_info = vipx_core_unload_graph_info,
+ .execute_submodel = vipx_core_execute_submodel,
+};
+
+static int vipx_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct vipx_device *device;
+ struct vipx_core *core;
+ struct vipx_context *vctx;
+ struct vipx_graph *graph;
+
+ vipx_enter();
+ miscdev = file->private_data;
+ device = dev_get_drvdata(miscdev->parent);
+ core = &device->core;
+
+ if (mutex_lock_interruptible(&core->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock device lock for open (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ ret = __vref_get(&core->open_cnt);
+ if (ret) {
+ vipx_err("vref_get(open) is fail(%d)", ret);
+ goto p_err_vref;
+ }
+
+ vctx = vipx_context_create(core);
+ if (IS_ERR(vctx)) {
+ ret = PTR_ERR(vctx);
+ goto p_err_vctx;
+ }
+
+ graph = vipx_graph_create(vctx, &core->system->graphmgr);
+ if (IS_ERR(graph)) {
+ ret = PTR_ERR(graph);
+ goto p_err_graph;
+ }
+ vctx->graph = graph;
+
+ file->private_data = vctx;
+
+ mutex_unlock(&core->lock);
+ vipx_leave();
+ return 0;
+p_err_graph:
+p_err_vctx:
+ __vref_put(&core->open_cnt);
+p_err_vref:
+ mutex_unlock(&core->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vipx_release(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct vipx_context *vctx;
+ struct vipx_core *core;
+
+ //TODO temp code
+ vipx_debug_write_log_binary();
+ vipx_debug_dump_debug_regs();
+ vipx_enter();
+ vctx = file->private_data;
+ core = vctx->core;
+
+ if (mutex_lock_interruptible(&core->lock)) {
+ ret = -ERESTARTSYS;
+ vipx_err("Failed to lock device lock for release (%d)\n", ret);
+ return ret;
+ }
+
+ vipx_graph_destroy(vctx->graph);
+ vipx_context_destroy(vctx);
+ __vref_put(&core->open_cnt);
+
+ mutex_unlock(&core->lock);
+ vipx_leave();
+ return 0;
+}
+
+static unsigned int vipx_poll(struct file *file, struct poll_table_struct *poll)
+{
+ int ret;
+ struct vipx_context *vctx;
+
+ vipx_enter();
+ vctx = file->private_data;
+ if (!(vctx->state & BIT(VIPX_CONTEXT_START))) {
+ ret = POLLERR;
+ vipx_err("Context state(%x) is not start (%d)\n",
+ vctx->state, ret);
+ goto p_err;
+ }
+
+ ret = vctx->queue_ops->poll(&vctx->queue_list, file, poll);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+const struct file_operations vipx_file_ops = {
+ .owner = THIS_MODULE,
+ .open = vipx_open,
+ .release = vipx_release,
+ .poll = vipx_poll,
+ .unlocked_ioctl = vipx_ioctl,
+ .compat_ioctl = vipx_compat_ioctl
+};
+
+static int __vref_open(struct vipx_core *core)
+{
+ vipx_check();
+ atomic_set(&core->start_cnt.refcount, 0);
+ return vipx_device_open(core->device);
+}
+
+static int __vref_close(struct vipx_core *core)
+{
+ vipx_check();
+ return vipx_device_close(core->device);
+}
+
+static int __vref_start(struct vipx_core *core)
+{
+ vipx_check();
+ return vipx_device_start(core->device);
+}
+
+static int __vref_stop(struct vipx_core *core)
+{
+ vipx_check();
+ return vipx_device_stop(core->device);
+}
+
+static inline void __vref_init(struct vipx_core_refcount *vref,
+ struct vipx_core *core,
+ int (*first)(struct vipx_core *core),
+ int (*final)(struct vipx_core *core))
+{
+ vipx_enter();
+ vref->core = core;
+ vref->first = first;
+ vref->final = final;
+ atomic_set(&vref->refcount, 0);
+ vipx_leave();
+}
+
+/* Top-level data for debugging */
+static struct vipx_dev *vdev;
+
+int vipx_core_probe(struct vipx_device *device)
+{
+ int ret;
+ struct vipx_core *core;
+
+ vipx_enter();
+ core = &device->core;
+ core->device = device;
+ core->system = &device->system;
+ vdev = &core->vdev;
+
+ mutex_init(&core->lock);
+ __vref_init(&core->open_cnt, core, __vref_open, __vref_close);
+ __vref_init(&core->start_cnt, core, __vref_start, __vref_stop);
+ core->ioc_ops = &vipx_core_ioctl_ops;
+
+ vipx_util_bitmap_init(core->vctx_map, VIPX_MAX_CONTEXT);
+ INIT_LIST_HEAD(&core->vctx_list);
+ core->vctx_count = 0;
+
+ vdev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ vdev->miscdev.name = VIPX_DEV_NAME;
+ vdev->miscdev.fops = &vipx_file_ops;
+ vdev->miscdev.parent = device->dev;
+
+ ret = misc_register(&vdev->miscdev);
+ if (ret) {
+ vipx_err("miscdevice is not registered (%d)\n", ret);
+ goto p_err_misc;
+ }
+
+ vipx_leave();
+ return 0;
+p_err_misc:
+ mutex_destroy(&core->lock);
+ return ret;
+}
+
+void vipx_core_remove(struct vipx_core *core)
+{
+ vipx_enter();
+ misc_deregister(&core->vdev.miscdev);
+ mutex_destroy(&core->lock);
+ vipx_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_CORE_H__
+#define __VIPX_CORE_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+
+#include "vipx-system.h"
+
+/* Information about VIPx device file */
+#define VIPX_DEV_NAME_LEN (16)
+#define VIPX_DEV_NAME "vipx"
+
+struct vipx_device;
+struct vipx_core;
+
+struct vipx_core_refcount {
+ atomic_t refcount;
+ struct vipx_core *core;
+ int (*first)(struct vipx_core *core);
+ int (*final)(struct vipx_core *core);
+};
+
+struct vipx_dev {
+ int minor;
+ char name[VIPX_DEV_NAME_LEN];
+ struct miscdevice miscdev;
+};
+
+struct vipx_core {
+ struct vipx_dev vdev;
+ struct mutex lock;
+ struct vipx_core_refcount open_cnt;
+ struct vipx_core_refcount start_cnt;
+ const struct vipx_ioctl_ops *ioc_ops;
+ DECLARE_BITMAP(vctx_map, VIPX_MAX_CONTEXT);
+ struct list_head vctx_list;
+ unsigned int vctx_count;
+
+ struct vipx_device *device;
+ struct vipx_system *system;
+};
+
+int vipx_core_probe(struct vipx_device *device);
+void vipx_core_remove(struct vipx_core *core);
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
*/
#include <linux/debugfs.h>
-#include <linux/delay.h>
#include <linux/ctype.h>
-#include <media/videobuf2-dma-sg.h>
-#include <linux/dma-buf.h>
-#include <linux/ion_exynos.h>
+#include <linux/uaccess.h>
-#include "vipx-config.h"
-#include "vipx-debug.h"
+#include "vipx-log.h"
+#include "vipx-device.h"
#include "vipx-graphmgr.h"
#include "vipx-graph.h"
-#include "vipx-system.h"
+#include "vipx-debug.h"
+
+#define DEBUG_FS_ROOT_NAME "vipx"
+#define DEBUG_FS_LOGFILE_NAME "fw-msg"
+#define DEBUG_FS_IMGFILE_NAME "dump-img"
+#define DEBUG_FS_GRPFILE_NAME "graph"
+#define DEBUG_FS_BUFFILE_NAME "buffer"
+
+#define DEBUG_MONITORING_PERIOD (HZ * 5)
+#define CHUNK_SIZE (32)
+
+static struct vipx_device *debug_device;
+
+int vipx_debug_write_log_binary(void)
+{
+ int ret;
+ struct vipx_system *sys;
+ struct vipx_binary *bin;
+
+ vipx_enter();
+ sys = &debug_device->system;
+ bin = &sys->binary;
+
+ if (!sys->memory.debug.kvaddr)
+ return -ENOMEM;
+
+ if (!current->fs) {
+ vipx_warn("Failed to write debug log as fs is invalid\n");
+ return -ESRCH;
+ }
-#define DEBUG_FS_ROOT_NAME "vipx"
-#define DEBUG_FS_LOGFILE_NAME "fw-msg"
-#define DEBUG_FS_IMGFILE_NAME "dump-img"
-#define DEBUG_FS_GRPFILE_NAME "graph"
-#define DEBUG_FS_BUFFILE_NAME "buffer"
+ ret = vipx_binary_write(bin, VIPX_DEBUG_BIN_PATH,
+ "vipx_log.bin",
+ sys->memory.debug.kvaddr,
+ sys->memory.debug.size);
+ if (!ret)
+ vipx_info("%s/vipx_log.bin was created for debugging\n",
+ VIPX_DEBUG_BIN_PATH);
-#define CHUNKSZ 32
+ vipx_leave();
+ return ret;
+}
-s32 atoi(const char *psz_buf)
+int vipx_debug_dump_debug_regs(void)
{
- const char *pch = psz_buf;
- s32 base = 0;
+ struct vipx_system *sys;
- while (isspace(*pch))
- pch++;
+ vipx_enter();
+ sys = &debug_device->system;
- if (*pch == '-' || *pch == '+') {
- base = 10;
- pch++;
- } else if (*pch && tolower(pch[strlen(pch) - 1]) == 'h') {
- base = 16;
- }
+ sys->ctrl_ops->debug_dump(sys);
+ vipx_leave();
+ return 0;
+}
- return simple_strtoul(pch, NULL, base);
+int vipx_debug_atoi(const char *psz_buf)
+{
+ const char *pch = psz_buf;
+ int base;
+
+ while (isspace(*pch))
+ pch++;
+
+ if (*pch == '-' || *pch == '+') {
+ base = 10;
+ pch++;
+ } else if (*pch && tolower(pch[strlen(pch) - 1]) == 'h') {
+ base = 16;
+ } else {
+ base = 0;
+ }
+
+ return kstrtoul(pch, base, NULL);
}
-int bitmap_scnprintf(char *buf, unsigned int buflen,
- const unsigned long *maskp, int nmaskbits)
+int vipx_debug_bitmap_scnprintf(char *buf, unsigned int buflen,
+ const unsigned long *maskp, int nmaskbits)
{
- int i, word, bit, len = 0;
- unsigned long val;
- const char *sep = "";
- int chunksz;
- u32 chunkmask;
-
- chunksz = nmaskbits & (CHUNKSZ - 1);
- if (chunksz == 0)
- chunksz = CHUNKSZ;
-
- i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
- for (; i >= 0; i -= CHUNKSZ) {
- chunkmask = ((1ULL << chunksz) - 1);
- word = i / BITS_PER_LONG;
- bit = i % BITS_PER_LONG;
- val = (maskp[word] >> bit) & chunkmask;
- len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
- (chunksz+3)/4, val);
- chunksz = CHUNKSZ;
- sep = ",";
- }
- return len;
+ int chunksz;
+ int idx;
+ unsigned int chunkmask;
+ int word, bit, len;
+ unsigned long val;
+ const char *sep = "";
+
+ chunksz = nmaskbits & (CHUNK_SIZE - 1);
+ if (chunksz == 0)
+ chunksz = CHUNK_SIZE;
+
+ for (idx = ALIGN(nmaskbits, CHUNK_SIZE) - CHUNK_SIZE, len = 0; idx >= 0;
+ idx -= CHUNK_SIZE) {
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = idx / BITS_PER_LONG;
+ bit = idx % BITS_PER_LONG;
+ val = (maskp[word] >> bit) & chunkmask;
+ len += scnprintf(buf + len, buflen - len, "%s%0*lx", sep,
+ (chunksz + 3) / 4, val);
+ chunksz = CHUNK_SIZE;
+ sep = ",";
+ }
+
+ return len;
}
void vipx_dmsg_concate(struct vipx_debug_log *log, const char *fmt, ...)
{
va_list ap;
char term[50];
- u32 copy_len;
+ size_t size;
va_start(ap, fmt);
vsnprintf(term, sizeof(term), fmt, ap);
va_end(ap);
if (log->dsentence_pos >= DEBUG_SENTENCE_MAX) {
- vipx_err("debug message(%zd) over max\n", log->dsentence_pos);
+ vipx_err("debug message(%zu) over max\n", log->dsentence_pos);
return;
}
- copy_len = min((DEBUG_SENTENCE_MAX - log->dsentence_pos - 1), strlen(term));
- strncpy(log->dsentence + log->dsentence_pos, term, copy_len);
- log->dsentence_pos += copy_len;
+ size = min((DEBUG_SENTENCE_MAX - log->dsentence_pos - 1),
+ strlen(term));
+ strncpy(log->dsentence + log->dsentence_pos, term, size);
+ log->dsentence_pos += size;
log->dsentence[log->dsentence_pos] = 0;
}
-char * vipx_dmsg_print(struct vipx_debug_log *log)
+char *vipx_dmsg_print(struct vipx_debug_log *log)
{
log->dsentence_pos = 0;
return log->dsentence;
}
-int vipx_debug_memdump8(u8 *start, u8 *end)
-{
- int ret = 0;
- u8 *cur;
- u32 items, offset;
- char term[50], sentence[250];
-
- cur = start;
- items = 0;
- offset = 0;
-
- memset(sentence, 0, sizeof(sentence));
- snprintf(sentence, sizeof(sentence), "[V] Memory Dump8(%p ~ %p)", start, end);
-
- while (cur < end) {
- if ((items % 16) == 0) {
-#ifdef DEBUG_LOG_MEMORY
- printk(KERN_DEBUG "%s\n", sentence);
-#else
- printk(KERN_INFO "%s\n", sentence);
-#endif
- offset = 0;
- snprintf(term, sizeof(term), "[V] %p: ", cur);
- snprintf(&sentence[offset], sizeof(sentence) - offset, "%s", term);
- offset += strlen(term);
- items = 0;
- }
-
- snprintf(term, sizeof(term), "%02X ", *cur);
- snprintf(&sentence[offset], sizeof(sentence) - offset, "%s", term);
- offset += strlen(term);
- cur++;
- items++;
- }
-
- if (items) {
-#ifdef DEBUG_LOG_MEMORY
- printk(KERN_DEBUG "%s\n", sentence);
-#else
- printk(KERN_INFO "%s\n", sentence);
-#endif
- }
-
- ret = cur - end;
-
- return ret;
-}
-
-
-int vipx_debug_memdump16(u16 *start, u16 *end)
-{
- int ret = 0;
- u16 *cur;
- u32 items, offset;
- char term[50], sentence[250];
-
- cur = start;
- items = 0;
- offset = 0;
-
- memset(sentence, 0, sizeof(sentence));
- snprintf(sentence, sizeof(sentence), "[V] Memory Dump16(%p ~ %p)", start, end);
-
- while (cur < end) {
- if ((items % 16) == 0) {
-#ifdef DEBUG_LOG_MEMORY
- printk(KERN_DEBUG "%s\n", sentence);
-#else
- printk(KERN_INFO "%s\n", sentence);
-#endif
- offset = 0;
- snprintf(term, sizeof(term), "[V] %p: ", cur);
- snprintf(&sentence[offset], sizeof(sentence) - offset, "%s", term);
- offset += strlen(term);
- items = 0;
- }
-
- snprintf(term, sizeof(term), "0x%04X ", *cur);
- snprintf(&sentence[offset], sizeof(sentence) - offset, "%s", term);
- offset += strlen(term);
- cur++;
- items++;
- }
-
- if (items) {
-#ifdef DEBUG_LOG_MEMORY
- printk(KERN_DEBUG "%s\n", sentence);
-#else
- printk(KERN_INFO "%s\n", sentence);
-#endif
- }
-
- ret = cur - end;
-
- return ret;
-}
-
-int vipx_debug_memdump32(u32 *start, u32 *end)
+void vipx_debug_memdump(const char *prefix, void *start, size_t size)
{
- int ret = 0;
- u32 *cur;
- u32 items, offset;
- char term[50], sentence[250];
-
- cur = start;
- items = 0;
- offset = 0;
-
- memset(sentence, 0, sizeof(sentence));
- snprintf(sentence, sizeof(sentence), "[V] Memory Dump32(%p ~ %p)", start, end);
-
- while (cur < end) {
- if ((items % 8) == 0) {
-#ifdef DEBUG_LOG_MEMORY
- printk(KERN_DEBUG "%s\n", sentence);
-#else
- printk(KERN_INFO "%s\n", sentence);
-#endif
- offset = 0;
- snprintf(term, sizeof(term), "[V] %p: ", cur);
- snprintf(&sentence[offset], sizeof(sentence) - offset, "%s", term);
- offset += strlen(term);
- items = 0;
- }
-
- snprintf(term, sizeof(term), "0x%08X ", *cur);
- snprintf(&sentence[offset], sizeof(sentence) - offset, "%s", term);
- offset += strlen(term);
- cur++;
- items++;
- }
-
- if (items) {
-#ifdef DEBUG_LOG_MEMORY
- printk(KERN_DEBUG "%s\n", sentence);
-#else
- printk(KERN_INFO "%s\n", sentence);
-#endif
- }
-
- ret = cur - end;
+ char log[30];
- return ret;
+ snprintf(log, sizeof(log), "[VIPx]%s ", prefix);
+ print_hex_dump(KERN_DEBUG, log, DUMP_PREFIX_OFFSET, 32, 4, start,
+ size, false);
}
static int vipx_debug_log_open(struct inode *inode, struct file *file)
{
+ vipx_enter();
if (inode->i_private)
file->private_data = inode->i_private;
-
+ vipx_leave();
return 0;
}
static ssize_t vipx_debug_log_read(struct file *file, char __user *user_buf,
- size_t buf_len, loff_t *ppos)
+ size_t buf_len, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
-
+ int ret;
+ size_t size;
struct vipx_debug *debug;
struct vipx_system *system;
+ vipx_enter();
debug = file->private_data;
- if (debug == NULL) {
- vipx_err("Cannot find private data\n");
+ if (!debug) {
+ vipx_err("Cannot find vipx_debug\n");
return 0;
}
- system = debug->system_data;
- if (system == NULL) {
- vipx_err("Cannot find system data\n");
+ system = debug->system;
+ if (!system) {
+ vipx_err("Cannot find vipx_system\n");
return 0;
}
else
size = buf_len;
- if (system->memory.info.kvaddr_debug == 0) {
- vipx_err("Cannot find debug region\n");
+ if (!system->memory.debug.kvaddr) {
+ vipx_err("Cannot find debug region for vipx\n");
return 0;
}
- ret = copy_to_user(user_buf, system->memory.info.kvaddr_debug, size);
- if (ret) {
- memcpy(user_buf, system->memory.info.kvaddr_debug, size);
- ret = 0;
- // return 0;
- }
+ ret = copy_to_user(user_buf, system->memory.debug.kvaddr, size);
+ if (ret)
+ memcpy(user_buf, system->memory.debug.kvaddr, size);
+ vipx_leave();
return size;
}
+static int __vipx_debug_grp_show(struct seq_file *s, void *unused)
+{
+ unsigned int idx;
+ struct vipx_debug *debug;
+ struct vipx_graphmgr *gmgr;
+ struct vipx_graph *graph;
+
+ vipx_enter();
+ debug = s->private;
+
+ seq_printf(s, "%7.s %7.s %7.s %7.s %7.s %7.s %7.s\n", "graph", "prio",
+ "period", "input", "done", "cancel", "recent");
+
+ gmgr = &debug->system->graphmgr;
+ mutex_lock(&gmgr->mlock);
+ for (idx = 0; idx < VIPX_MAX_GRAPH; ++idx) {
+ graph = gmgr->graph[idx];
+
+ if (!graph)
+ continue;
+
+ seq_printf(s, "%2d(%3d) %7d %7d %7d %7d %7d\n",
+ graph->idx, graph->uid, graph->priority,
+ graph->input_cnt, graph->done_cnt,
+ graph->cancel_cnt, graph->recent);
+ }
+ mutex_unlock(&gmgr->mlock);
+
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_debug_grp_open(struct inode *inode, struct file *file)
+{
+ vipx_check();
+ return single_open(file, __vipx_debug_grp_show, inode->i_private);
+}
+
+static int __vipx_debug_buf_show(struct seq_file *s, void *unused)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_debug_buf_open(struct inode *inode, struct file *file)
+{
+ vipx_enter();
+ vipx_leave();
+ return single_open(file, __vipx_debug_buf_show, inode->i_private);
+}
+
static int vipx_debug_img_open(struct inode *inode, struct file *file)
{
+ vipx_enter();
if (inode->i_private)
file->private_data = inode->i_private;
-
+ vipx_leave();
return 0;
}
static ssize_t vipx_debug_img_read(struct file *file, char __user *user_buf,
- size_t len, loff_t *ppos)
+ size_t len, loff_t *ppos)
{
- size_t size = 0;
+ int ret;
+ size_t size;
struct vipx_debug *debug;
struct vipx_debug_imgdump *imgdump;
- int ret = 0;
+ vipx_enter();
debug = file->private_data;
imgdump = &debug->imgdump;
if (!imgdump->kvaddr) {
- vipx_err("kvaddr is NULL\n");
+ vipx_err("kvaddr of imgdump for debugging is NULL\n");
return 0;
}
- if (!imgdump->cookie) {
- vipx_err("cookie is NULL\n");
- return 0;
- }
+ /* TODO check */
+ //if (!imgdump->cookie) {
+ // vipx_err("cookie is NULL\n");
+ // return 0;
+ //}
if (len <= imgdump->length)
size = len;
imgdump->kvaddr = NULL;
imgdump->length = 0;
imgdump->offset = 0;
- goto p_err;
+ return 0;
}
/* HACK for test */
memset(imgdump->kvaddr, 0x88, size / 2);
- //vb2_ion_sync_for_device(imgdump->cookie, imgdump->offset, size, DMA_FROM_DEVICE);
+ /* TODO check */
+ //vb2_ion_sync_for_device(imgdump->cookie, imgdump->offset,
+ // size, DMA_FROM_DEVICE);
ret = copy_to_user(user_buf, imgdump->kvaddr, size);
- if (ret) {
+ if (ret)
memcpy(user_buf, imgdump->kvaddr, size);
- ret = 0;
- // return 0;
-
- }
-
- vipx_info("DUMP : %p, SIZE : %zd\n", imgdump->kvaddr, size);
imgdump->offset += size;
imgdump->length -= size;
imgdump->kvaddr = (char *)imgdump->kvaddr + size;
-p_err:
+ vipx_leave();
return size;
}
-static ssize_t vipx_debug_img_write(struct file *file, const char __user *user_buf,
- size_t len, loff_t *ppos)
+static ssize_t vipx_debug_img_write(struct file *file,
+ const char __user *user_buf, size_t len, loff_t *ppos)
{
+ vipx_enter();
+ vipx_leave();
return len;
}
-static int vipx_debug_grp_show(struct seq_file *s, void *unused)
-{
- u32 i;
- struct vipx_debug *debug = s->private;
- struct vipx_graphmgr *graphmgr = debug->graphmgr_data;
- struct vipx_graph *graph;
-
- seq_printf(s, "------------------------------------------"
- "----------------------------------------"
- "--------------------------------------\n");
- seq_printf(s, "%7.s %7.s %7.s %7.s %7.s %7.s %7.s\n",
- "graph", "prio", "period", "input", "done", "cancel", "recent");
- seq_printf(s, "------------------------------------------"
- "----------------------------------------"
- "--------------------------------------\n");
-
- mutex_lock(&graphmgr->mlock);
- for (i = 0; i < VIPX_MAX_GRAPH; ++i) {
- graph = graphmgr->graph[i];
-
- if (!graph)
- continue;
-
- seq_printf(s, "%2d(%3d) %7d %7d %7d %7d %7d\n",
- graph->idx, graph->uid, graph->priority,
- graph->input_cnt, graph->done_cnt, graph->cancel_cnt, graph->recent);
- }
- mutex_unlock(&graphmgr->mlock);
-
- seq_printf(s, "------------------------------------------"
- "----------------------------------------"
- "--------------------------------------\n");
- return 0;
-}
-
-static int vipx_debug_grp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, vipx_debug_grp_show, inode->i_private);
-}
-
-static int vipx_debug_buf_show(struct seq_file *s, void *unused)
-{
- return 0;
-}
-
-static int vipx_debug_buf_open(struct inode *inode, struct file *file)
-{
- return single_open(file, vipx_debug_buf_show, inode->i_private);
-}
-
static const struct file_operations vipx_debug_log_fops = {
.open = vipx_debug_log_open,
.read = vipx_debug_log_read,
.llseek = default_llseek
};
-static const struct file_operations vipx_debug_img_fops = {
- .open = vipx_debug_img_open,
- .read = vipx_debug_img_read,
- .write = vipx_debug_img_write,
- .llseek = default_llseek
-};
-
static const struct file_operations vipx_debug_grp_fops = {
.open = vipx_debug_grp_open,
.read = seq_read,
.release = single_release
};
-static void vipx_debug_monitor_fn(unsigned long data)
+static const struct file_operations vipx_debug_img_fops = {
+ .open = vipx_debug_img_open,
+ .read = vipx_debug_img_read,
+ .write = vipx_debug_img_write,
+ .llseek = default_llseek
+};
+
+static void __vipx_debug_monitor_fn(unsigned long data)
{
- struct vipx_debug_monitor *monitor = (struct vipx_debug_monitor *)data;
- struct vipx_debug *debug = container_of(monitor, struct vipx_debug, monitor);
- struct vipx_graphmgr *graphmgr = debug->graphmgr_data;
- struct vipx_system *system = debug->system_data;
- struct vipx_interface *interface = &system->interface;
+ struct vipx_debug *debug;
+ struct vipx_debug_monitor *monitor;
+ struct vipx_graphmgr *gmgr;
+ struct vipx_system *system;
+ struct vipx_interface *itf;
+
+ vipx_enter();
+ debug = (struct vipx_debug *)data;
+ monitor = &debug->monitor;
+ system = debug->system;
+ gmgr = &system->graphmgr;
+ itf = &system->interface;
if (!test_bit(VIPX_DEBUG_STATE_START, &debug->state))
return;
- if (monitor->tick_cnt == graphmgr->tick_cnt)
- vipx_err("timer thread is stuck(%d, %d)\n", monitor->tick_cnt, graphmgr->tick_pos);
+ if (monitor->tick_cnt == gmgr->tick_cnt)
+ vipx_err("timer thread is stuck (%u,%u)\n",
+ monitor->tick_cnt, gmgr->tick_pos);
- if (monitor->sched_cnt == graphmgr->sched_cnt) {
+ if (monitor->sched_cnt == gmgr->sched_cnt) {
struct vipx_graph *graph;
- u32 i;
+ unsigned int idx;
- vipx_info("GRAPH--------------------------------------------------------------\n");
- for (i = 0; i < VIPX_MAX_GRAPH; i++) {
- graph = graphmgr->graph[i];
+ vipx_dbg("[DEBUGFS] GRAPH\n");
+ for (idx = 0; idx < VIPX_MAX_GRAPH; idx++) {
+ graph = gmgr->graph[idx];
if (!graph)
continue;
vipx_graph_print(graph);
}
- vipx_info("GRAPH-MGR----------------------------------------------------------\n");
- vipx_taskdesc_print(graphmgr);
-
- vipx_info("INTERFACE-MGR------------------------------------------------------\n");
- vipx_interface_print(interface);
+ vipx_dbg("[DEBUGFS] GRAPH-MGR\n");
+ vipx_taskdesc_print(gmgr);
- vipx_info("-------------------------------------------------------------------\n");
- vipx_err("graph thread is stuck(%d, %d)\n", monitor->sched_cnt, graphmgr->sched_pos);
+ vipx_dbg("[DEBUGFS] INTERFACE-MGR\n");
+ vipx_task_print_all(&itf->taskmgr);
}
- monitor->tick_cnt = graphmgr->tick_cnt;
- monitor->sched_cnt = graphmgr->sched_cnt;
- monitor->done_cnt = interface->done_cnt;
- vipx_info("TIME %d(%d, %d, %d)\n", monitor->time_cnt, monitor->sched_cnt, monitor->tick_cnt, monitor->done_cnt);
+ monitor->tick_cnt = gmgr->tick_cnt;
+ monitor->sched_cnt = gmgr->sched_cnt;
+ monitor->done_cnt = itf->done_cnt;
monitor->time_cnt++;
mod_timer(&monitor->timer, jiffies + DEBUG_MONITORING_PERIOD);
+ vipx_leave();
}
-int __vipx_debug_stop(struct vipx_debug *debug)
+static int __vipx_debug_stop(struct vipx_debug *debug)
{
- int ret = 0;
- struct vipx_debug_monitor *monitor = &debug->monitor;
+ struct vipx_debug_monitor *monitor;
+
+ vipx_enter();
+ monitor = &debug->monitor;
if (!test_bit(VIPX_DEBUG_STATE_START, &debug->state))
- goto p_err;
+ goto p_end;
- del_timer(&monitor->timer);
+ /* TODO check debug */
+ //del_timer_sync(&monitor->timer);
clear_bit(VIPX_DEBUG_STATE_START, &debug->state);
-p_err:
- return ret;
+ vipx_leave();
+p_end:
+ return 0;
}
-int vipx_debug_probe(struct vipx_debug *debug, void *graphmgr_data, void *system_data)
+int vipx_debug_start(struct vipx_debug *debug)
{
- debug->graphmgr_data = graphmgr_data;
- debug->system_data = system_data;
-
- debug->root = debugfs_create_dir(DEBUG_FS_ROOT_NAME, NULL);
- if (debug->root)
- probe_info("%s is created\n", DEBUG_FS_ROOT_NAME);
-
- debug->logfile = debugfs_create_file(DEBUG_FS_LOGFILE_NAME, S_IRUSR,
- debug->root, debug, &vipx_debug_log_fops);
- if (debug->logfile)
- probe_info("%s is created\n", DEBUG_FS_LOGFILE_NAME);
-
- debug->imgdump.file = debugfs_create_file(DEBUG_FS_IMGFILE_NAME, S_IRUSR,
- debug->root, debug, &vipx_debug_img_fops);
- if (debug->imgdump.file)
- probe_info("%s is created\n", DEBUG_FS_IMGFILE_NAME);
+ struct vipx_system *system = debug->system;
+ struct vipx_graphmgr *gmgr = &system->graphmgr;
+ struct vipx_debug_monitor *monitor = &debug->monitor;
+ struct vipx_interface *itf = &system->interface;
- debug->grpfile = debugfs_create_file(DEBUG_FS_GRPFILE_NAME, S_IRUSR,
- debug->root, debug, &vipx_debug_grp_fops);
- if (debug->grpfile)
- probe_info("%s is created\n", DEBUG_FS_GRPFILE_NAME);
+ vipx_enter();
+ monitor->time_cnt = 0;
+ monitor->tick_cnt = gmgr->tick_cnt;
+ monitor->sched_cnt = gmgr->sched_cnt;
+ monitor->done_cnt = itf->done_cnt;
- debug->grpfile = debugfs_create_file(DEBUG_FS_BUFFILE_NAME, S_IRUSR,
- debug->root, debug, &vipx_debug_buf_fops);
- if (debug->buffile)
- probe_info("%s is created\n", DEBUG_FS_BUFFILE_NAME);
+ init_timer(&monitor->timer);
+ monitor->timer.expires = jiffies + DEBUG_MONITORING_PERIOD;
+ monitor->timer.data = (unsigned long)debug;
+ monitor->timer.function = __vipx_debug_monitor_fn;
+ /* TODO check debug */
+ //add_timer(&monitor->timer);
- clear_bit(VIPX_DEBUG_STATE_START, &debug->state);
+ set_bit(VIPX_DEBUG_STATE_START, &debug->state);
+ vipx_leave();
return 0;
}
+int vipx_debug_stop(struct vipx_debug *debug)
+{
+ vipx_check();
+ return __vipx_debug_stop(debug);
+}
+
int vipx_debug_open(struct vipx_debug *debug)
{
+ vipx_enter();
+ vipx_leave();
return 0;
}
int vipx_debug_close(struct vipx_debug *debug)
{
- int ret = 0;
-
- ret = __vipx_debug_stop(debug);
- if (ret)
- vipx_err("__vipx_debug_stop is fail(%d)\n", ret);
-
- return ret;
+ vipx_check();
+ return __vipx_debug_stop(debug);
}
-int vipx_debug_start(struct vipx_debug *debug)
+int vipx_debug_probe(struct vipx_device *device)
{
- int ret = 0;
- struct vipx_debug_monitor *monitor = &debug->monitor;
- struct vipx_graphmgr *graphmgr = debug->graphmgr_data;
- struct vipx_system *system = debug->system_data;
- struct vipx_interface *interface = &system->interface;
+ struct vipx_debug *debug;
- monitor->tick_cnt = graphmgr->tick_cnt;
- monitor->sched_cnt = graphmgr->sched_cnt;
- monitor->done_cnt = interface->done_cnt;
- monitor->time_cnt = 0;
+ vipx_enter();
+ debug_device = device;
+ debug = &device->debug;
+ debug->system = &device->system;
+ debug->state = 0;
- set_bit(VIPX_DEBUG_STATE_START, &debug->state);
-
- init_timer(&monitor->timer);
- monitor->timer.expires = jiffies + DEBUG_MONITORING_PERIOD;
- monitor->timer.data = (unsigned long)monitor;
- monitor->timer.function = vipx_debug_monitor_fn;
- add_timer(&monitor->timer);
+ debug->root = debugfs_create_dir(DEBUG_FS_ROOT_NAME, NULL);
+ if (!debug->root) {
+ vipx_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_ROOT_NAME);
+ goto p_end;
+ }
- return ret;
+ debug->logfile = debugfs_create_file(DEBUG_FS_LOGFILE_NAME, 0400,
+ debug->root, debug, &vipx_debug_log_fops);
+ if (!debug->logfile)
+ vipx_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_LOGFILE_NAME);
+
+ debug->grpfile = debugfs_create_file(DEBUG_FS_GRPFILE_NAME, 0400,
+ debug->root, debug, &vipx_debug_grp_fops);
+ if (!debug->grpfile)
+ vipx_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_GRPFILE_NAME);
+
+ debug->buffile = debugfs_create_file(DEBUG_FS_BUFFILE_NAME, 0400,
+ debug->root, debug, &vipx_debug_buf_fops);
+ if (!debug->buffile)
+ vipx_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_BUFFILE_NAME);
+
+ debug->imgdump.file = debugfs_create_file(DEBUG_FS_IMGFILE_NAME, 0400,
+ debug->root, debug, &vipx_debug_img_fops);
+ if (!debug->imgdump.file)
+ vipx_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_IMGFILE_NAME);
+
+ vipx_leave();
+p_end:
+ return 0;
}
-int vipx_debug_stop(struct vipx_debug *debug)
+void vipx_debug_remove(struct vipx_debug *debug)
{
- int ret = 0;
-
- ret = __vipx_debug_stop(debug);
- if (ret)
- vipx_err("__vipx_debug_stop is fail(%d)\n", ret);
-
- return ret;
+ debugfs_remove_recursive(debug->root);
}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_DEBUG_H__
+#define __VIPX_DEBUG_H__
+
+#include <linux/timer.h>
+
+#include "vipx-config.h"
+#include "vipx-system.h"
+
+#ifdef DEBUG_LOG_CONCATE_ENABLE
+#define DLOG_INIT() struct vipx_debug_log dlog = \
+ { .dsentence_pos = 0 }
+#define DLOG(fmt, ...) \
+ vipx_debug_log_concate(&dlog, fmt, ##__VA_ARGS__)
+#define DLOG_OUT() vipx_debug_log_print(&dlog)
+#else
+#define DLOG_INIT()
+#define DLOG(fmt, ...)
+#define DLOG_OUT() "FORBIDDEN HISTORY"
+#endif
+#define DEBUG_SENTENCE_MAX (300)
+
+struct vipx_device;
+
+struct vipx_debug_imgdump {
+ struct dentry *file;
+ unsigned int target_graph;
+ unsigned int target_chain;
+ unsigned int target_pu;
+ unsigned int target_index;
+ void *kvaddr;
+ void *cookie;
+ size_t length;
+ size_t offset;
+};
+
+struct vipx_debug_monitor {
+ unsigned int time_cnt;
+ unsigned int tick_cnt;
+ unsigned int sched_cnt;
+ unsigned int done_cnt;
+ struct timer_list timer;
+};
+
+enum vipx_debug_state {
+ VIPX_DEBUG_STATE_START,
+};
+
+struct vipx_debug {
+ unsigned long state;
+ struct vipx_system *system;
+
+ struct dentry *root;
+ struct dentry *logfile;
+ struct dentry *grpfile;
+ struct dentry *buffile;
+
+ struct vipx_debug_imgdump imgdump;
+ struct vipx_debug_monitor monitor;
+};
+
+struct vipx_debug_log {
+ size_t dsentence_pos;
+ char dsentence[DEBUG_SENTENCE_MAX];
+};
+
+int vipx_debug_write_log_binary(void);
+int vipx_debug_dump_debug_regs(void);
+
+int vipx_debug_atoi(const char *psz_buf);
+int vipx_debug_bitmap_scnprintf(char *buf, unsigned int buflen,
+ const unsigned long *maskp, int nmaskbits);
+
+void vipx_debug_log_concate(struct vipx_debug_log *log, const char *fmt, ...);
+char *vipx_debug_log_print(struct vipx_debug_log *log);
+
+void vipx_debug_memdump(const char *prefix, void *start, size_t size);
+
+int vipx_debug_start(struct vipx_debug *debug);
+int vipx_debug_stop(struct vipx_debug *debug);
+int vipx_debug_open(struct vipx_debug *debug);
+int vipx_debug_close(struct vipx_debug *debug);
+
+int vipx_debug_probe(struct vipx_device *device);
+void vipx_debug_remove(struct vipx_debug *debug);
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/exynos_iovmm.h>
#include <linux/pm_runtime.h>
-#include <linux/memblock.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/exynos_iovmm.h>
-#include "vipx-config.h"
-#include "vipx-device.h"
+#include "vipx-log.h"
#include "vipx-graph.h"
-#include "vipx-vertex.h"
-#include "vipx-graphmgr.h"
-
-static struct reserved_mem *vipx_fw_code_rmem;
-void *vipx_fw_code_rmem_base;
+#include "vipx-device.h"
-static int vipx_device_runtime_suspend(struct device *dev);
-static int vipx_device_runtime_resume(struct device *dev);
+void __vipx_fault_handler(struct vipx_device *device)
+{
+ vipx_enter();
+ vipx_leave();
+}
-static int __init vipx_fw_code_rmem_setup(struct reserved_mem *rmem)
+static int __attribute__((unused)) vipx_fault_handler(
+ struct iommu_domain *domain, struct device *dev,
+ unsigned long fault_addr, int fault_flag, void *token)
{
- pr_info("%s: base=%pa, size=%pa\n", __func__, &rmem->base, &rmem->size);
+ struct vipx_device *device;
+
+ pr_err("< VIPX FAULT HANDLER >\n");
+ pr_err("Device virtual(0x%lX) is invalid access\n", fault_addr);
+
+ device = dev_get_drvdata(dev);
+ vipx_debug_dump_debug_regs();
- vipx_fw_code_rmem = rmem;
+ __vipx_fault_handler(device);
+
+ return -EINVAL;
+}
+#if defined(CONFIG_PM_SLEEP)
+static int vipx_device_suspend(struct device *dev)
+{
+ vipx_enter();
+ vipx_leave();
return 0;
}
-RESERVEDMEM_OF_DECLARE(vipx_fw_code_rmem, "exynos,vipx_fw_code_rmem", vipx_fw_code_rmem_setup);
-
-void __vipx_fault_handler(struct vipx_device *device) {
-#if 0
- struct vipx_system *system;
- struct vipx_memory *memory;
- struct vipx_graphmgr *graphmgr;
- struct vipx_graph *graph;
- struct vipxo_pu *pu, *temp;
- struct vb_container *container;
- struct vb_buffer *buffer;
- u32 i, j, k;
-
- system = &device->system;
- memory = &system->memory;
- graphmgr = &device->graphmgr;
-
- /* 1. Internal Memory Infomation */
- vipx_info("internal memory : 0x%llX\n", memory->info.dvaddr);
-
- /* 2. Buffer Memroy Information */
- for (i = 0; i < VIPX_MAX_GRAPH; ++i) {
- graph = graphmgr->graph[i];
- if (!graph)
- continue;
-
- vipx_info("=================================================\n");
- vipx_info("GRAPH : %d(%d)\n", graph->id, graph->uid);
- vipx_info("INPUT--------------------------------------------\n");
- list_for_each_entry_safe(pu, temp, &graph->inleaf_list, gleaf_entry) {
- vipx_info("PU : %d\n", pu->id);
- vipx_info("-------------------------------------------------\n");
- for (j = 0; j < VIPX_MAX_BUFFER; ++j) {
- container = pu->container[j];
- if (!container)
- continue;
-
- if (j == 0) {
- vipx_info("TYPE : %d\n", container->type);
- vipx_info("RESOLUTION : %d x %d\n", container->format->width, container->format->height);
- vipx_info("SIZE : %d\n", container->format->size[container->format->plane]);
- vipx_info("-------------------------------------------------\n");
- }
-
- for (k = 0; k < container->count; ++k) {
- buffer = &container->buffers[k];
- vipx_info("[%d][%d]DVADDR : %llx\n", j, k, buffer->dvaddr);
- vipx_info("[%d][%d]KVADDR : %p\n", j, k, buffer->kvaddr);
- }
-
- vipx_info("-------------------------------------------------\n");
- }
- }
-
- vipx_info("OUTPUT-------------------------------------------\n");
- list_for_each_entry_safe(pu, temp, &graph->otleaf_list, gleaf_entry) {
- vipx_info("PU : %d\n", pu->id);
- vipx_info("-------------------------------------------------\n");
- for (j = 0; j < VIPX_MAX_BUFFER; ++j) {
- container = pu->container[j];
- if (!container)
- continue;
-
- if (j == 0) {
- vipx_info("TYPE : %d\n", container->type);
- vipx_info("RESOLUTION : %d x %d\n", container->format->width, container->format->height);
- vipx_info("SIZE : %d\n", container->format->size[container->format->plane]);
- vipx_info("-------------------------------------------------\n");
- }
-
- for (k = 0; k < container->count; ++k) {
- buffer = &container->buffers[k];
- vipx_info("[%d][%d]DVADDR : %llx\n", j, k, buffer->dvaddr);
- vipx_info("[%d][%d]KVADDR : %p\n", j, k, buffer->kvaddr);
- }
-
- vipx_info("-------------------------------------------------\n");
- }
- }
-
- vipx_graph_print(graph);
- }
-#endif
+static int vipx_device_resume(struct device *dev)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
}
+#endif
-static int __attribute__((unused)) vipx_fault_handler(struct iommu_domain *domain,
- struct device *dev,
- unsigned long fault_addr,
- int fault_flag,
- void *token)
+static int vipx_device_runtime_suspend(struct device *dev)
{
+ int ret;
struct vipx_device *device;
- pr_err("<VIPX FAULT HANDLER>\n");
- pr_err("Device virtual(0x%X) is invalid access\n", (u32)fault_addr);
+ vipx_enter();
+ device = dev_get_drvdata(dev);
+ ret = vipx_system_suspend(&device->system);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+p_err:
+ return ret;
+}
+
+static int vipx_device_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct vipx_device *device;
+
+ vipx_enter();
device = dev_get_drvdata(dev);
- __vipx_fault_handler(device);
+ ret = vipx_system_resume(&device->system);
+ if (ret)
+ goto p_err;
- return -EINVAL;
+ vipx_leave();
+p_err:
+ return ret;
}
+static const struct dev_pm_ops vipx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ vipx_device_suspend,
+ vipx_device_resume)
+ SET_RUNTIME_PM_OPS(
+ vipx_device_runtime_suspend,
+ vipx_device_runtime_resume,
+ NULL)
+};
+
static int __vipx_device_start(struct vipx_device *device)
{
- int ret = 0;
+ int ret;
- if (test_bit(VIPX_DEVICE_STATE_START, &device->state)) {
- vipx_err("already started\n");
+ vipx_enter();
+ if (!test_bit(VIPX_DEVICE_STATE_OPEN, &device->state)) {
ret = -EINVAL;
- goto p_err;
+ vipx_err("device was not opend yet (%lx)\n", device->state);
+ goto p_err_state;
}
+ if (test_bit(VIPX_DEVICE_STATE_START, &device->state))
+ return 0;
+
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ set_bit(VIPX_DEVICE_STATE_START, &device->state);
+ return 0;
+#endif
ret = vipx_system_start(&device->system);
- if (ret) {
- vipx_err("vipx_system_start is fail(%d)\n", ret);
- goto p_err;
- }
+ if (ret)
+ goto p_err_system;
ret = vipx_debug_start(&device->debug);
- if (ret) {
- vipx_err("vipx_debug_start is fail(%d)\n", ret);
- goto p_err;
- }
+ if (ret)
+ goto p_err_debug;
set_bit(VIPX_DEVICE_STATE_START, &device->state);
-p_err:
+ vipx_leave();
+ return 0;
+p_err_debug:
+ vipx_system_stop(&device->system);
+p_err_system:
+p_err_state:
return ret;
}
static int __vipx_device_stop(struct vipx_device *device)
{
- int ret = 0;
-
+ vipx_enter();
if (!test_bit(VIPX_DEVICE_STATE_START, &device->state))
- goto p_err;
-
- ret = vipx_debug_stop(&device->debug);
- if (ret)
- vipx_err("vipx_debug_stop is fail(%d)\n", ret);
-
- ret = vipx_system_stop(&device->system);
- if (ret)
- vipx_err("vipx_system_stop is fail(%d)\n", ret);
+ return 0;
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ clear_bit(VIPX_DEVICE_STATE_START, &device->state);
+ return 0;
+#endif
+ vipx_debug_stop(&device->debug);
+ vipx_system_stop(&device->system);
clear_bit(VIPX_DEVICE_STATE_START, &device->state);
-p_err:
- return ret;
+ vipx_leave();
+ return 0;
}
static int __vipx_device_power_on(struct vipx_device *device)
{
- int ret = 0;
+ int ret;
+ vipx_enter();
+#if defined(CONFIG_PM)
ret = pm_runtime_get_sync(device->dev);
+ if (ret) {
+ vipx_err("Failed to get pm_runtime sync (%d)\n", ret);
+ goto p_err;
+ }
+#else
+ ret = vipx_device_runtime_resume(device->dev);
if (ret)
- vipx_err("runtime resume is fail(%d)", ret);
+ goto p_err;
+#endif
+ vipx_leave();
+p_err:
return ret;
}
static int __vipx_device_power_off(struct vipx_device *device)
{
- int ret = 0;
+ int ret;
+ vipx_enter();
+#if defined(CONFIG_PM)
ret = pm_runtime_put_sync(device->dev);
+ if (ret) {
+ vipx_err("Failed to put pm_runtime sync (%d)\n", ret);
+ goto p_err;
+ }
+#else
+ ret = vipx_device_runtime_suspend(device->dev);
if (ret)
- vipx_err("runtime resume is fail(%d)", ret);
+ goto p_err;
+#endif
+ vipx_leave();
+p_err:
return ret;
}
-static int vipx_device_probe(struct platform_device *pdev)
+int vipx_device_start(struct vipx_device *device)
{
- int ret = 0;
- struct device *dev;
- struct vipx_device *device;
+ int ret;
- BUG_ON(!pdev);
-
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
-
- dev = &pdev->dev;
-
- device = devm_kzalloc(dev, sizeof(struct vipx_device), GFP_KERNEL);
- if (!device) {
- probe_err("device is NULL");
- ret = -ENOMEM;
- goto p_err;
- }
-
- ret = vipx_system_probe(&device->system, pdev);
- if (ret) {
- probe_err("vipx_system_probe is fail(%d)\n", ret);
- ret = -EINVAL;
+ vipx_enter();
+ ret = __vipx_device_start(device);
+ if (ret)
goto p_err;
- }
- ret = vipx_vertex_probe(&device->vertex, dev);
- if (ret) {
- probe_err("vipx_vertex_probe is fail(%d)\n", ret);
- ret = -EINVAL;
- goto p_err;
- }
+ vipx_leave();
+p_err:
+ return ret;
+}
- ret = vipx_graphmgr_probe(&device->graphmgr);
- if (ret) {
- probe_err("vipx_graphmgr_probe is fail(%d)\n", ret);
- ret = -EINVAL;
- goto p_err;
- }
+int vipx_device_stop(struct vipx_device *device)
+{
+ int ret;
- ret = vipx_debug_probe(&device->debug, &device->graphmgr, &device->system);
- if (ret) {
- probe_err("vipx_debug_probe is fail(%d)\n", ret);
- ret = -EINVAL;
+ vipx_enter();
+ ret = __vipx_device_stop(device);
+ if (ret)
goto p_err;
- }
-
- iovmm_set_fault_handler(dev, vipx_fault_handler, NULL);
- pm_runtime_enable(dev);
-
- device->dev = dev;
- device->mode = VIPX_DEVICE_MODE_NORMAL;
- clear_bit(VIPX_DEVICE_STATE_OPEN, &device->state);
- clear_bit(VIPX_DEVICE_STATE_START, &device->state);
- dev_set_drvdata(dev, device);
-
- vipx_fw_code_rmem_base = phys_to_virt(vipx_fw_code_rmem->base);
+ vipx_leave();
p_err:
-
- probe_info("%s():%d\n", __func__, ret);
return ret;
}
int vipx_device_open(struct vipx_device *device)
{
- int ret = 0;
-
- BUG_ON(!device);
+ int ret;
+ vipx_enter();
if (test_bit(VIPX_DEVICE_STATE_OPEN, &device->state)) {
- vipx_err("device is already opened\n");
ret = -EINVAL;
- goto p_err;
+ vipx_warn("device was already opened\n");
+ goto p_err_state;
}
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ device->system.graphmgr.current_model = NULL;
+ set_bit(VIPX_DEVICE_STATE_OPEN, &device->state);
+ return 0;
+#endif
+
ret = vipx_system_open(&device->system);
- if (ret) {
- vipx_err("vipx_system_open is fail(%d)\n", ret);
- goto p_err;
- }
+ if (ret)
+ goto p_err_system;
ret = vipx_debug_open(&device->debug);
- if (ret) {
- vipx_err("vipx_debug_open is fail(%d)\n", ret);
- goto p_err;
- }
-
- ret = vipx_graphmgr_open(&device->graphmgr);
- if (ret) {
- vipx_err("vipx_graphmgr_open is fail(%d)\n", ret);
- goto p_err;
- }
+ if (ret)
+ goto p_err_debug;
ret = __vipx_device_power_on(device);
- if (ret) {
- vipx_err("__vipx_device_power_on is fail(%d)\n", ret);
- goto p_err;
- }
+ if (ret)
+ goto p_err_power;
+
+ ret = vipx_system_fw_bootup(&device->system);
+ if (ret)
+ goto p_err_boot;
set_bit(VIPX_DEVICE_STATE_OPEN, &device->state);
-p_err:
- vipx_info("%s():%d\n", __func__, ret);
+ vipx_leave();
+ return 0;
+p_err_boot:
+ __vipx_device_power_off(device);
+p_err_power:
+ vipx_debug_close(&device->debug);
+p_err_debug:
+ vipx_system_close(&device->system);
+p_err_system:
+p_err_state:
return ret;
}
int vipx_device_close(struct vipx_device *device)
{
- int ret = 0;
-
- BUG_ON(!device);
-
+ vipx_enter();
if (!test_bit(VIPX_DEVICE_STATE_OPEN, &device->state)) {
- vipx_err("device is already closed\n");
- ret = -EINVAL;
+ vipx_warn("device is already closed\n");
goto p_err;
}
- ret = __vipx_device_stop(device);
- if (ret)
- vipx_err("__vipx_device_stop is fail(%d)\n", ret);
-
- ret = vipx_graphmgr_close(&device->graphmgr);
- if (ret)
- vipx_err("vipx_graphmgr_close is fail(%d)\n", ret);
-
- ret = vipx_system_close(&device->system);
- if (ret)
- vipx_err("vipx_system_close is fail(%d)\n", ret);
-
- ret = vipx_debug_close(&device->debug);
- if (ret)
- vipx_err("vipx_debug_close is fail(%d)\n", ret);
-
- ret = __vipx_device_power_off(device);
- if (ret)
- vipx_err("__vipx_device_power_off is fail(%d)\n", ret);
+ if (test_bit(VIPX_DEVICE_STATE_START, &device->state))
+ __vipx_device_stop(device);
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ clear_bit(VIPX_DEVICE_STATE_OPEN, &device->state);
+ return 0;
+#endif
+ __vipx_device_power_off(device);
+ vipx_debug_close(&device->debug);
+ vipx_system_close(&device->system);
clear_bit(VIPX_DEVICE_STATE_OPEN, &device->state);
+ vipx_leave();
p_err:
- vipx_info("%s():%d\n", __func__, ret);
- return ret;
+ return 0;
}
-int vipx_device_start(struct vipx_device *device)
+static int vipx_device_probe(struct platform_device *pdev)
{
- int ret = 0;
-
- BUG_ON(!device);
+ int ret;
+ struct device *dev;
+ struct vipx_device *device;
- ret = __vipx_device_start(device);
- if (ret)
- vipx_err("__vipx_device_start is fail(%d)\n", ret);
+ vipx_enter();
+ dev = &pdev->dev;
- vipx_info("%s():%d\n", __func__, ret);
+ device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ ret = -ENOMEM;
+ vipx_err("Fail to alloc device structure\n");
+ goto p_err_alloc;
+ }
- return ret;
-}
+ get_device(dev);
+ device->dev = dev;
+ dev_set_drvdata(dev, device);
-int vipx_device_stop(struct vipx_device *device)
-{
- int ret = 0;
+ ret = vipx_system_probe(device);
+ if (ret)
+ goto p_err_system;
- BUG_ON(!device);
+ ret = vipx_core_probe(device);
+ if (ret)
+ goto p_err_core;
- ret = __vipx_device_stop(device);
+ ret = vipx_debug_probe(device);
if (ret)
- vipx_err("__vipx_device_stop is fail(%d)\n", ret);
+ goto p_err_debug;
- vipx_info("%s():%d\n", __func__, ret);
+ iovmm_set_fault_handler(dev, vipx_fault_handler, NULL);
+ pm_runtime_enable(dev);
+ vipx_leave();
+ vipx_info("vipx device is initilized\n");
+ return 0;
+p_err_debug:
+ vipx_core_remove(&device->core);
+p_err_core:
+ vipx_system_remove(&device->system);
+p_err_system:
+ devm_kfree(dev, device);
+p_err_alloc:
+ vipx_err("vipx device is not registered (%d)\n", ret);
return ret;
}
static int vipx_device_remove(struct platform_device *pdev)
{
- return 0;
-}
-
-static int vipx_device_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int vipx_device_resume(struct device *dev)
-{
- return 0;
-}
-
-static int vipx_device_runtime_suspend(struct device *dev)
-{
- int ret = 0;
struct vipx_device *device;
- device = dev_get_drvdata(dev);
-
- ret = vipx_system_suspend(&device->system);
- if (ret)
- vipx_err("vipx_system_suspend is fail(%d)\n", ret);
+ vipx_enter();
+ device = dev_get_drvdata(&pdev->dev);
- vipx_info("%s():%d\n", __func__, ret);
- return ret;
+ vipx_debug_remove(&device->debug);
+ vipx_core_remove(&device->core);
+ vipx_system_remove(&device->system);
+ devm_kfree(device->dev, device);
+ vipx_leave();
+ return 0;
}
-static int vipx_device_runtime_resume(struct device *dev)
+static void vipx_device_shutdown(struct platform_device *pdev)
{
- int ret = 0;
struct vipx_device *device;
- device = dev_get_drvdata(dev);
-
- ret = vipx_system_resume(&device->system, device->mode);
- if (ret) {
- vipx_err("vipx_system_resume is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- vipx_info("%s():%d\n", __func__, ret);
- return ret;
+ vipx_enter();
+ device = dev_get_drvdata(&pdev->dev);
+ vipx_leave();
}
-static const struct dev_pm_ops vipx_pm_ops = {
- .suspend = vipx_device_suspend,
- .resume = vipx_device_resume,
- .runtime_suspend = vipx_device_runtime_suspend,
- .runtime_resume = vipx_device_runtime_resume,
-};
-
+#if defined(CONFIG_OF)
static const struct of_device_id exynos_vipx_match[] = {
{
.compatible = "samsung,exynos-vipx",
{}
};
MODULE_DEVICE_TABLE(of, exynos_vipx_match);
+#endif
static struct platform_driver vipx_driver = {
.probe = vipx_device_probe,
.remove = vipx_device_remove,
+ .shutdown = vipx_device_shutdown,
.driver = {
.name = "exynos-vipx",
.owner = THIS_MODULE,
.pm = &vipx_pm_ops,
+#if defined(CONFIG_OF)
.of_match_table = of_match_ptr(exynos_vipx_match)
+#endif
}
};
+// TODO temp code
+extern int __init vertex_device_init(void);
+extern void __exit vertex_device_exit(void);
+
static int __init vipx_device_init(void)
{
- int ret = platform_driver_register(&vipx_driver);
+ int ret;
+
+ vipx_enter();
+ ret = platform_driver_register(&vipx_driver);
if (ret)
- probe_err("platform_driver_register is fail():%d\n", ret);
+ vipx_err("platform driver for vipx is not registered(%d)\n",
+ ret);
- probe_info("vipx device init is loaded");
+ vertex_device_init();
+ vipx_leave();
return ret;
}
-late_initcall(vipx_device_init);
static void __exit vipx_device_exit(void)
{
+ vipx_enter();
+ vertex_device_exit();
platform_driver_unregister(&vipx_driver);
+ vipx_leave();
}
+
+#if defined(MODULE)
+module_init(vipx_device_init);
+#else
+late_initcall(vipx_device_init);
+#endif
module_exit(vipx_device_exit);
-MODULE_AUTHOR("Scott Choi<hyeon.choi@samsung.com>");
+MODULE_AUTHOR("@samsung.com>");
MODULE_DESCRIPTION("Exynos VIPx driver");
MODULE_LICENSE("GPL");
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef VIPX_DEVICE_H_
-#define VIPX_DEVICE_H_
+#ifndef __VIPX_DEVICE_H__
+#define __VIPX_DEVICE_H__
+
+#include <linux/device.h>
-#include "vipx-graphmgr.h"
#include "vipx-system.h"
-#include "vipx-vertex.h"
+#include "vipx-core.h"
#include "vipx-debug.h"
+struct vipx_device;
+
enum vipx_device_state {
VIPX_DEVICE_STATE_OPEN,
VIPX_DEVICE_STATE_START
};
-enum vipx_device_mode {
- VIPX_DEVICE_MODE_NORMAL,
- VIPX_DEVICE_MODE_TEST
-};
-
struct vipx_device {
- struct device *dev;
- unsigned long state;
- u32 mode;
-
- struct vipx_graphmgr graphmgr;
- struct vipx_system system;
- struct vipx_vertex vertex;
- struct vipx_debug debug;
+ struct device *dev;
+ unsigned long state;
+
+ struct vipx_system system;
+ struct vipx_core core;
+ struct vipx_debug debug;
};
int vipx_device_open(struct vipx_device *device);
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
*/
#include <linux/delay.h>
-#include <linux/slab.h>
+#include "vipx-log.h"
+#include "vipx-core.h"
+#include "vipx-queue.h"
+#include "vipx-context.h"
#include "vipx-graphmgr.h"
#include "vipx-graph.h"
-#include "vipx-debug.h"
-#include "vs4l.h"
-const struct vipx_graph_ops vipx_graph_ops;
+#define VIPX_GRAPH_MAX_PRIORITY (20)
+#define VIPX_GRAPH_TIMEOUT (3 * HZ)
static int __vipx_graph_start(struct vipx_graph *graph)
{
- int ret = 0;
- BUG_ON(!graph);
+ int ret;
+ vipx_enter();
if (test_bit(VIPX_GRAPH_STATE_START, &graph->state))
return 0;
- ret = vipx_graphmgr_grp_start(graph->cookie, graph);
- if (ret) {
- vipx_ierr("vipx_graphmgr_grp_start is fail(%d)\n", graph, ret);
+ ret = vipx_graphmgr_grp_start(graph->owner, graph);
+ if (ret)
goto p_err;
- }
set_bit(VIPX_GRAPH_STATE_START, &graph->state);
+ vipx_leave();
+ return 0;
p_err:
- vipx_info("%s:%d\n", __func__, ret);
return ret;
}
static int __vipx_graph_stop(struct vipx_graph *graph)
{
- int ret = 0, errcnt = 0;
- u32 retry, timeout;
- struct vipx_taskmgr *taskmgr;
+ unsigned int timeout, retry;
+ struct vipx_taskmgr *tmgr;
struct vipx_task *control;
+ vipx_enter();
if (!test_bit(VIPX_GRAPH_STATE_START, &graph->state))
return 0;
- taskmgr = &graph->taskmgr;
- if (taskmgr->req_cnt + taskmgr->pre_cnt) {
+ tmgr = &graph->taskmgr;
+ if (tmgr->req_cnt + tmgr->pre_cnt) {
control = &graph->control;
control->message = VIPX_CTRL_STOP;
- vipx_graphmgr_queue(graph->cookie, control);
+ vipx_graphmgr_queue(graph->owner, control);
timeout = wait_event_timeout(graph->control_wq,
- control->message == VIPX_CTRL_STOP_DONE, VIPX_GRAPH_STOP_TIMEOUT);
- if (!timeout) {
- vipx_ierr("wait_event_timeout is expired\n", graph);
- errcnt++;
- }
+ control->message == VIPX_CTRL_STOP_DONE,
+ VIPX_GRAPH_TIMEOUT);
+ if (!timeout)
+ vipx_err("wait time(%u ms) is expired (%u)\n",
+ jiffies_to_msecs(VIPX_GRAPH_TIMEOUT),
+ graph->idx);
}
retry = VIPX_STOP_WAIT_COUNT;
- while (--retry && taskmgr->req_cnt) {
- vipx_iwarn("waiting %d request cancel...(%d)\n", graph, taskmgr->req_cnt, retry);
- msleep(1);
- }
+ while (tmgr->req_cnt) {
+ if (!retry)
+ break;
- if (!retry) {
- vipx_ierr("request cancel is fail\n", graph);
- errcnt++;
+ vipx_warn("Waiting request count(%u) to be zero (%u/%u)\n",
+ tmgr->req_cnt, retry, graph->idx);
+ udelay(10);
}
- retry = VIPX_STOP_WAIT_COUNT;
- while (--retry && taskmgr->pre_cnt) {
- vipx_iwarn("waiting %d prepare cancel...(%d)\n", graph, taskmgr->pre_cnt, retry);
- msleep(1);
- }
-
- if (!retry) {
- vipx_ierr("prepare cancel is fail\n", graph);
- errcnt++;
- }
+ if (tmgr->req_cnt)
+ vipx_err("request count(%u) is remained (%u)\n",
+ tmgr->req_cnt, graph->idx);
retry = VIPX_STOP_WAIT_COUNT;
- while (--retry && taskmgr->pro_cnt) {
- vipx_iwarn("waiting %d process done...(%d)\n", graph, taskmgr->pro_cnt, retry);
- msleep(1);
- }
-
- if (!retry) {
- vipx_ierr("process done is fail\n", graph);
- errcnt++;
- }
+ while (tmgr->pre_cnt) {
+ if (!retry)
+ break;
- ret = vipx_graphmgr_grp_stop(graph->cookie, graph);
- if (ret) {
- vipx_ierr("vipx_graphmgr_grp_stop is fail(%d)\n", graph, ret);
- errcnt++;
+ vipx_warn("Waiting prepare count(%u) to be zero (%u/%u)\n",
+ tmgr->pre_cnt, retry, graph->idx);
+ udelay(10);
}
- vipx_task_flush(taskmgr);
- clear_bit(VIPX_GRAPH_STATE_START, &graph->state);
-
- vipx_info("%s:%d\n", __func__, ret);
- return errcnt;
-}
-
-static int __vipx_graph_unmap(struct vipx_graph *graph)
-{
- int ret = 0;
-
- clear_bit(VIPX_GRAPH_STATE_MMAPPED, &graph->state);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-static int __vipx_graph_map(struct vipx_graph *graph)
-{
- int ret = 0;
-
- set_bit(VIPX_GRAPH_STATE_MMAPPED, &graph->state);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-static int __vipx_graph_alloc(struct vipx_graph *graph)
-{
- int ret = 0;
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-static int __vipx_graph_free(struct vipx_graph *graph)
-{
- int ret = 0;
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-static int __vipx_graph_parse(struct vipx_graph *graph)
-{
- int ret = 0;
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-void vipx_graph_task_print(struct vipx_graph *graph)
-{
- vipx_info("%s:\n", __func__);
-}
-
-void vipx_graph_print(struct vipx_graph *graph)
-{
-
- vipx_info("%s:\n", __func__);
-}
-
-int vipx_graph_create(struct vipx_graph **graph, void *cookie, void *memory)
-{
- int ret = 0;
- u32 i;
- struct vipx_taskmgr *taskmgr;
-
- BUG_ON(!cookie);
+ if (tmgr->pre_cnt)
+ vipx_err("prepare count(%u) is remained (%u)\n",
+ tmgr->pre_cnt, graph->idx);
- *graph = kzalloc(sizeof(struct vipx_graph), GFP_KERNEL);
- if (*graph == NULL) {
- vipx_err("kzalloc is fail");
- ret = -ENOMEM;
- goto p_err;
- }
-
- ret = vipx_graphmgr_grp_register(cookie, *graph);
- if (ret) {
- vipx_err("vipx_graphmgr_grp_register is fail(%d)\n", ret);
- kfree(*graph);
- goto p_err;
- }
+ retry = VIPX_STOP_WAIT_COUNT;
+ while (tmgr->pro_cnt) {
+ if (!retry)
+ break;
- (*graph)->control.message = VIPX_CTRL_NONE;
- (*graph)->cookie = cookie;
- (*graph)->memory = memory;
- (*graph)->gops = &vipx_graph_ops;
- mutex_init(&(*graph)->local_lock);
-
- /* task manager init */
- taskmgr = &(*graph)->taskmgr;
- taskmgr->id = (*graph)->idx;
- taskmgr->sindex = 0;
- spin_lock_init(&taskmgr->slock);
-
- for (i = 0; i < VIPX_MAX_TASK; ++i) {
- (*graph)->inhash[i] = VIPX_MAX_TASK;
- (*graph)->othash[i] = VIPX_MAX_TASK;
+ vipx_warn("Waiting process count(%u) to be zero (%u/%u)\n",
+ tmgr->pro_cnt, retry, graph->idx);
+ udelay(10);
}
- (*graph)->control.owner = *graph;
- init_waitqueue_head(&(*graph)->control_wq);
- ret = vipx_task_init(taskmgr, *graph);
- if (ret) {
- vipx_err("vipx_task_init is fail(%d)\n", ret);
- kfree(*graph);
- goto p_err;
- }
+ if (tmgr->pro_cnt)
+ vipx_err("process count(%u) is remained (%u)\n",
+ tmgr->pro_cnt, graph->idx);
- (*graph)->informat_list.count = 0;
- (*graph)->informat_list.formats = 0;
- (*graph)->otformat_list.count = 0;
- (*graph)->otformat_list.formats = 0;
+ vipx_graphmgr_grp_stop(graph->owner, graph);
+ vipx_task_flush(tmgr);
+ clear_bit(VIPX_GRAPH_STATE_START, &graph->state);
-p_err:
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
+ vipx_leave();
+ return 0;
}
-int vipx_graph_destroy(struct vipx_graph *graph)
+static int vipx_graph_set_graph(struct vipx_graph *graph,
+ struct vs4l_graph *ginfo)
{
- int ret = 0;
-
- BUG_ON(!graph);
-
- ret = __vipx_graph_stop(graph);
- if (ret)
- vipx_ierr("__vipx_graph_stop is fail(%d)\n", graph, ret);
-
- ret = vipx_graphmgr_grp_unregister(graph->cookie, graph);
- if (ret)
- vipx_ierr("vipx_graphmgr_grp_unregister is fail(%d)\n", graph, ret);
-
- ret = __vipx_graph_unmap(graph);
- if (ret)
- vipx_ierr("__vipx_graph_unmap is fail(%d)\n", graph, ret);
-
- ret = __vipx_graph_free(graph);
- if (ret)
- vipx_ierr("__vipx_graph_free is fail(%d)\n", graph, ret);
-
- if (graph->informat_list.formats)
- kfree(graph->informat_list.formats);
- graph->informat_list.formats = 0;
- graph->informat_list.count = 0;
-
- if (graph->otformat_list.formats)
- kfree(graph->otformat_list.formats);
- graph->otformat_list.count = 0;
-
- kfree(graph);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_graph_config(struct vipx_graph *graph, struct vs4l_graph *info)
-{
- int ret = 0;
-
- BUG_ON(!graph);
- BUG_ON(!graph->cookie);
- BUG_ON(!info);
+ int ret;
+ vipx_enter();
if (test_bit(VIPX_GRAPH_STATE_CONFIG, &graph->state)) {
- vipx_ierr("graph is already configured\n", graph);
ret = -EINVAL;
+ vipx_err("graph(%u) is already configured (%lu)\n",
+ graph->idx, graph->state);
goto p_err;
}
- if (info->priority > VIPX_GRAPH_MAX_PRIORITY) {
- vipx_iwarn("graph priority is over(%d)\n", graph, info->priority);
- info->priority = VIPX_GRAPH_MAX_PRIORITY;
- }
-
- graph->uid = info->id;
- graph->flags = info->flags;
- graph->priority = info->priority;
-
- /* 2. graph allocation */
- ret = __vipx_graph_alloc(graph);
- if (ret) {
- vipx_ierr("__vipx_graph_alloc is fail(%d)\n", graph, ret);
- goto p_err;
+ if (ginfo->priority > VIPX_GRAPH_MAX_PRIORITY) {
+ vipx_warn("graph(%u) priority is over (%u/%u)\n",
+ graph->idx, ginfo->priority,
+ VIPX_GRAPH_MAX_PRIORITY);
+ ginfo->priority = VIPX_GRAPH_MAX_PRIORITY;
}
- /* 3. parsing */
- ret = __vipx_graph_parse(graph);
- if (ret) {
- vipx_ierr("__vipx_graph_parse is fail(%d)\n", graph, ret);
- goto p_err;
- }
-
- /* 4. Buffer Mapping */
- ret = __vipx_graph_map(graph);
- if (ret) {
- vipx_ierr("__vipx_graph_map is fail(%d)\n", graph, ret);
- goto p_err;
- }
+ graph->uid = ginfo->id;
+ graph->flags = ginfo->flags;
+ graph->priority = ginfo->priority;
set_bit(VIPX_GRAPH_STATE_CONFIG, &graph->state);
+ vipx_leave();
+ return 0;
p_err:
- vipx_iinfo("%s(%d, %d, %X):%d\n", graph, __func__, info->id, info->priority, info->flags, ret);
return ret;
}
-int vipx_graph_param(struct vipx_graph *graph, struct vs4l_param_list *plist)
+static int vipx_graph_set_format(struct vipx_graph *graph,
+ struct vs4l_format_list *flist)
{
- int ret = 0;
-
- set_bit(VIPX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags);
-
- vipx_iinfo("%s:%d\n", graph, __func__, ret);
- return ret;
-}
-
-static int vipx_graph_prepare(struct vb_queue *q, struct vb_container_list *clist)
-{
- int ret = 0;
-
- vipx_dbg("%s:%d\n", __func__, ret);
- return ret;
-}
+ int ret;
+ struct vipx_format_list *in_flist;
+ struct vipx_format_list *ot_flist;
+ unsigned int cnt;
-static int vipx_graph_unprepare(struct vb_queue *q, struct vb_container_list *clist)
-{
- int ret = 0;
-
- vipx_dbg("%s:%d\n", __func__, ret);
- return ret;
-}
-
-const struct vb_ops vb_ops = {
- .buf_prepare = vipx_graph_prepare,
- .buf_unprepare = vipx_graph_unprepare
-};
-
-int vipx_graph_start(struct vipx_queue *queue)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx;
- struct vipx_graph *graph;
-
- BUG_ON(!queue);
-
- vctx = container_of(queue, struct vipx_vertex_ctx, queue);
- graph = container_of(vctx, struct vipx_graph, vctx);
-
- ret = __vipx_graph_start(graph);
- if (ret)
- vipx_ierr("__vipx_graph_start is fail(%d)\n", graph, ret);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-int vipx_graph_stop(struct vipx_queue *queue)
-{
- int ret = 0;
- struct vipx_graph *graph;
- struct vipx_vertex_ctx *vctx;
-
- BUG_ON(!queue);
-
- vctx = container_of(queue, struct vipx_vertex_ctx, queue);
- graph = container_of(vctx, struct vipx_graph, vctx);
-
- ret = __vipx_graph_stop(graph);
- if (ret)
- vipx_ierr("__vipx_graph_stop is fail(%d)\n", graph, ret);
-
- vipx_info("%s:%d\n", __func__, ret);
- return ret;
-}
-
-/* Description of flist elements
- * tartget : We done need this field. Ignore.
- * format : Fourcc name. Converting to char pointer.
- * plane : Num of planes.
- * width, height
- */
-int vipx_graph_format(struct vipx_queue *queue, struct vs4l_format_list *flist)
-{
- int ret = 0;
- struct vipx_graph *graph;
- struct vipx_vertex_ctx *vctx;
- struct vipx_format_list *in_list;
- struct vipx_format_list *ot_list;
- int cnt = 0;
-
- BUG_ON(!queue);
- BUG_ON(!flist);
-
- vctx = container_of(queue, struct vipx_vertex_ctx, queue);
- graph = container_of(vctx, struct vipx_graph, vctx);
- in_list = &graph->informat_list;
- ot_list = &graph->otformat_list;
+ vipx_enter();
+ in_flist = &graph->inflist;
+ ot_flist = &graph->otflist;
if (flist->direction == VS4L_DIRECTION_IN) {
- if (in_list->count != flist->count) {
- if (in_list->formats)
- kfree(in_list->formats);
+ if (in_flist->count != flist->count) {
+ kfree(in_flist->formats);
- in_list->count = flist->count;
- in_list->formats = kzalloc(in_list->count * sizeof(struct vipx_format), GFP_KERNEL);
- if (!in_list->formats) {
+ in_flist->count = flist->count;
+ in_flist->formats = kcalloc(in_flist->count,
+ sizeof(*in_flist->formats), GFP_KERNEL);
+ if (!in_flist->formats) {
ret = -ENOMEM;
+ vipx_err("Failed to alloc in_flist formats\n");
goto p_err;
}
- for (cnt = 0; cnt < in_list->count; cnt++) {
- vipx_info("[%d] size (%dx%d), format(%d), plane(%d)\n", cnt,
- flist->formats[cnt].width,
- flist->formats[cnt].height,
- flist->formats[cnt].format,
- flist->formats[cnt].plane);
-
- in_list->formats[cnt].width = flist->formats[cnt].width;
- in_list->formats[cnt].height = flist->formats[cnt].height;
- in_list->formats[cnt].format = flist->formats[cnt].format;
- in_list->formats[cnt].plane = flist->formats[cnt].plane;
+ for (cnt = 0; cnt < in_flist->count; ++cnt) {
+ in_flist->formats[cnt].format =
+ flist->formats[cnt].format;
+ in_flist->formats[cnt].plane =
+ flist->formats[cnt].plane;
+ in_flist->formats[cnt].width =
+ flist->formats[cnt].width;
+ in_flist->formats[cnt].height =
+ flist->formats[cnt].height;
}
}
} else if (flist->direction == VS4L_DIRECTION_OT) {
- if (ot_list->count != flist->count) {
- if (ot_list->formats)
- kfree(ot_list->formats);
+ if (ot_flist->count != flist->count) {
+ kfree(ot_flist->formats);
- ot_list->count = flist->count;
- ot_list->formats = kzalloc(ot_list->count * sizeof(struct vipx_format), GFP_KERNEL);
- if (!ot_list->formats) {
+ ot_flist->count = flist->count;
+ ot_flist->formats = kcalloc(ot_flist->count,
+ sizeof(*ot_flist->formats), GFP_KERNEL);
+ if (!ot_flist->formats) {
ret = -ENOMEM;
+ vipx_err("Failed to alloc ot_flist formats\n");
goto p_err;
}
- for (cnt = 0; cnt < ot_list->count; cnt++) {
- vipx_info("[%d] size (%dx%d), format(%d), plane(%d)\n", cnt,
- flist->formats[cnt].width,
- flist->formats[cnt].height,
- flist->formats[cnt].format,
- flist->formats[cnt].plane);
-
- ot_list->formats[cnt].width = flist->formats[cnt].width;
- ot_list->formats[cnt].height = flist->formats[cnt].height;
- ot_list->formats[cnt].format = flist->formats[cnt].format;
- ot_list->formats[cnt].plane = flist->formats[cnt].plane;
+ for (cnt = 0; cnt < ot_flist->count; ++cnt) {
+ ot_flist->formats[cnt].format =
+ flist->formats[cnt].format;
+ ot_flist->formats[cnt].plane =
+ flist->formats[cnt].plane;
+ ot_flist->formats[cnt].width =
+ flist->formats[cnt].width;
+ ot_flist->formats[cnt].height =
+ flist->formats[cnt].height;
}
}
} else {
- vipx_err("invalid direction(%d)\n", flist->direction);
ret = -EINVAL;
+ vipx_err("invalid direction (%d)\n", flist->direction);
goto p_err;
}
- vipx_info("%s:%d, count in/out (%d/%d)\n", __func__, ret,
- graph->informat_list.count,
- graph->otformat_list.count);
- return ret;
-
+ vipx_leave();
+ return 0;
p_err:
- if (in_list->formats)
- kfree(in_list->formats);
- in_list->formats = 0;
-
- if (ot_list->formats)
- kfree(ot_list->formats);
- ot_list->formats = 0;
+ kfree(in_flist->formats);
+ in_flist->formats = NULL;
+ in_flist->count = 0;
+ kfree(ot_flist->formats);
+ ot_flist->formats = NULL;
+ ot_flist->count = 0;
return ret;
}
-static int vipx_graph_queue(struct vipx_queue *queue, struct vb_container_list *incl, struct vb_container_list *otcl)
+static int vipx_graph_set_param(struct vipx_graph *graph,
+ struct vs4l_param_list *plist)
{
- int ret = 0;
- unsigned long flag;
- struct vipx_graph *graph;
- struct vipx_vertex_ctx *vctx;
- struct vipx_taskmgr *taskmgr;
- struct vipx_task *task;
- int i = 0, j = 0;
+ vipx_enter();
+ set_bit(VIPX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags);
+ vipx_leave();
+ return 0;
+}
- BUG_ON(!queue);
- BUG_ON(!incl);
- //BUG_ON(incl->index < VIPX_MAX_TASK);
- BUG_ON(!otcl);
- //BUG_ON(otcl->index < VIPX_MAX_TASK);
+static int vipx_graph_set_ctrl(struct vipx_graph *graph,
+ struct vs4l_ctrl *ctrl)
+{
+ vipx_enter();
+ vipx_graph_print(graph);
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_graph_queue(struct vipx_graph *graph,
+ struct vipx_container_list *incl,
+ struct vipx_container_list *otcl)
+{
+ int ret;
+ struct vipx_taskmgr *tmgr;
+ struct vipx_task *task;
+ unsigned long flags;
- vctx = container_of(queue, struct vipx_vertex_ctx, queue);
- graph = container_of(vctx, struct vipx_graph, vctx);
- taskmgr = &graph->taskmgr;
+ vipx_enter();
+ tmgr = &graph->taskmgr;
if (!test_bit(VIPX_GRAPH_STATE_START, &graph->state)) {
- vipx_ierr("graph is NOT start\n", graph);
ret = -EINVAL;
+ vipx_err("graph(%u) is not started (%lu)\n",
+ graph->idx, graph->state);
goto p_err;
}
if (incl->id != otcl->id) {
- vipx_warn("buffer id is incoincidence(%d, %d)\n", incl->id, otcl->id);
+ vipx_warn("buffer id is incoincidence (%u/%u)\n",
+ incl->id, otcl->id);
otcl->id = incl->id;
}
- taskmgr_e_barrier_irqs(taskmgr, 0, flag);
- vipx_task_pick_fre_to_req(taskmgr, &task);
- taskmgr_x_barrier_irqr(taskmgr, 0, flag);
-
+ taskmgr_e_barrier_irqs(tmgr, 0, flags);
+ task = vipx_task_pick_fre_to_req(tmgr);
+ taskmgr_x_barrier_irqr(tmgr, 0, flags);
if (!task) {
- vipx_ierr("task is lack\n", graph);
- vipx_task_print_all(taskmgr);
ret = -ENOMEM;
+ vipx_err("free task is not remained (%u)\n", graph->idx);
+ vipx_task_print_all(tmgr);
goto p_err;
}
graph->othash[otcl->index] = task->index;
graph->input_cnt++;
- vipx_dbg("in-container list: dir(%d), id(%d), index(%d), flags(%lx), count(%d)\n",
- incl->direction, incl->id, incl->index, incl->flags, incl->count);
- for (i = 0; i < incl->count; i++) {
- vipx_dbg("in-containers[%d] type(%d), target(%d), memory(%d), count(%d)\n", i,
- incl->containers[i].type, incl->containers[i].target,
- incl->containers[i].memory, incl->containers[i].count);
-
- for (j = 0; j < incl->containers[i].count; j++) {
- vipx_dbg("in-buffer[%d] fd(%d), kvaddr(%p), dvaddr(%p)\n", j,
- incl->containers[i].buffers[j].m.fd,
- incl->containers[i].buffers[j].kvaddr,
- (void *)incl->containers[i].buffers[j].dvaddr);
- }
- }
-
- vipx_dbg("out-container list: dir(%d), id(%d), index(%d), flags(%lx), count(%d)\n",
- otcl->direction, otcl->id, otcl->index, otcl->flags, otcl->count);
- for (i = 0; i < otcl->count; i++) {
- vipx_dbg("out-containers[%d] type(%d), target(%d), memory(%d), count(%d)\n", i,
- otcl->containers[i].type, otcl->containers[i].target,
- otcl->containers[i].memory, otcl->containers[i].count);
-
- for (j = 0; j < otcl->containers[i].count; j++) {
- vipx_dbg("out-buffer[%d] fd(%d), kvaddr(%p), dvaddr(%p)\n", j,
- otcl->containers[i].buffers[j].m.fd,
- otcl->containers[i].buffers[j].kvaddr,
- (void *)otcl->containers[i].buffers[j].dvaddr);
- }
- }
-
task->id = incl->id;
task->incl = incl;
task->otcl = otcl;
if ((incl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP)) ||
(otcl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))) {
set_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags);
- vipx_get_timestamp(&task->time[VIPX_TMP_QUEUE]);
+ vipx_get_timestamp(&task->time[VIPX_TIME_QUEUE]);
}
- vipx_graphmgr_queue(graph->cookie, task);
+ vipx_graphmgr_queue(graph->owner, task);
+ vipx_leave();
+ return 0;
p_err:
- vipx_dbg("%s:%d\n", __func__, ret);
return ret;
}
-static int vipx_graph_deque(struct vipx_queue *queue, struct vb_container_list *clist)
+static int vipx_graph_deque(struct vipx_graph *graph,
+ struct vipx_container_list *clist)
{
- int ret = 0;
- u32 findex;
- unsigned long flags;
- struct vipx_graph *graph;
- struct vipx_vertex_ctx *vctx;
- struct vipx_taskmgr *taskmgr;
+ int ret;
+ struct vipx_taskmgr *tmgr;
+ unsigned int tidx;
struct vipx_task *task;
+ unsigned long flags;
- BUG_ON(!queue);
- BUG_ON(!clist);
- //BUG_ON(clist->index < VIPX_MAX_TASK);
-
- vctx = container_of(queue, struct vipx_vertex_ctx, queue);
- graph = container_of(vctx, struct vipx_graph, vctx);
- taskmgr = &graph->taskmgr;
+ vipx_enter();
+ tmgr = &graph->taskmgr;
if (!test_bit(VIPX_GRAPH_STATE_START, &graph->state)) {
- vipx_ierr("graph is NOT start\n", graph);
ret = -EINVAL;
+ vipx_err("graph(%u) is not started (%lu)\n",
+ graph->idx, graph->state);
goto p_err;
}
if (clist->direction == VS4L_DIRECTION_IN)
- findex = graph->inhash[clist->index];
+ tidx = graph->inhash[clist->index];
else
- findex = graph->othash[clist->index];
+ tidx = graph->othash[clist->index];
- if (findex >= VIPX_MAX_TASK) {
- vipx_ierr("task index(%d) invalid\n", graph, findex);
- BUG();
+ if (tidx >= VIPX_MAX_TASK) {
+ ret = -EINVAL;
+ vipx_err("task index(%u) invalid (%u)\n", tidx, graph->idx);
+ goto p_err;
}
- task = &taskmgr->task[findex];
+ task = &tmgr->task[tidx];
if (task->state != VIPX_TASK_STATE_COMPLETE) {
- vipx_ierr("task state(%d) is invalid\n", graph, task->state);
- BUG();
+ ret = -EINVAL;
+ vipx_err("task(%u) state(%d) is invalid (%u)\n",
+ tidx, task->state, graph->idx);
+ goto p_err;
}
if (clist->direction == VS4L_DIRECTION_IN) {
if (task->incl != clist) {
- vipx_ierr("incl ptr is invalid(%p != %p)\n", graph, task->incl, clist);
- BUG();
+ ret = -EINVAL;
+ vipx_err("incl ptr is invalid (%u)\n", graph->idx);
+ goto p_err;
}
graph->inhash[clist->index] = VIPX_MAX_TASK;
task->incl = NULL;
} else {
if (task->otcl != clist) {
- vipx_ierr("otcl ptr is invalid(%p != %p)\n", graph, task->otcl, clist);
- BUG();
+ ret = -EINVAL;
+ vipx_err("otcl ptr is invalid (%u)\n", graph->idx);
+ goto p_err;
}
graph->othash[clist->index] = VIPX_MAX_TASK;
}
if (task->incl || task->otcl)
- goto p_err;
+ return 0;
- taskmgr_e_barrier_irqs(taskmgr, 0, flags);
- vipx_task_trans_com_to_fre(taskmgr, task);
- taskmgr_x_barrier_irqr(taskmgr, 0, flags);
+ taskmgr_e_barrier_irqs(tmgr, 0, flags);
+ vipx_task_trans_com_to_fre(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, 0, flags);
+ vipx_leave();
+ return 0;
p_err:
- vipx_dbg("%s:%d\n", __func__, ret);
return ret;
}
-const struct vipx_queue_ops vipx_queue_ops = {
- .start = vipx_graph_start,
- .stop = vipx_graph_stop,
- .format = vipx_graph_format,
+static int vipx_graph_start(struct vipx_graph *graph)
+{
+ int ret;
+
+ vipx_enter();
+ ret = __vipx_graph_start(graph);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_graph_stop(struct vipx_graph *graph)
+{
+ vipx_enter();
+
+ __vipx_graph_stop(graph);
+
+ vipx_leave();
+ return 0;
+}
+
+const struct vipx_queue_gops vipx_queue_gops = {
+ .set_graph = vipx_graph_set_graph,
+ .set_format = vipx_graph_set_format,
+ .set_param = vipx_graph_set_param,
+ .set_ctrl = vipx_graph_set_ctrl,
.queue = vipx_graph_queue,
- .deque = vipx_graph_deque
+ .deque = vipx_graph_deque,
+ .start = vipx_graph_start,
+ .stop = vipx_graph_stop
};
static int vipx_graph_control(struct vipx_graph *graph, struct vipx_task *task)
{
- int ret = 0;
- struct vipx_taskmgr *taskmgr;
-
- BUG_ON(!graph);
- BUG_ON(!task);
+ int ret;
+ struct vipx_taskmgr *tmgr;
- taskmgr = &graph->taskmgr;
-
- if (&graph->control != task) {
- vipx_ierr("control task is invalid(%p == %p)\n", graph, &graph->control, task);
- BUG();
- }
+ vipx_enter();
+ tmgr = &graph->taskmgr;
switch (task->message) {
case VIPX_CTRL_STOP:
wake_up(&graph->control_wq);
break;
default:
- vipx_ierr("unresolved message(%d)\n", graph, task->message);
- vipx_task_print_all(taskmgr);
- BUG();
- break;
+ ret = -EINVAL;
+ vipx_err("invalid task message(%u) of graph(%u)\n",
+ task->message, graph->idx);
+ vipx_task_print_all(tmgr);
+ goto p_err;
}
- vipx_iinfo("%s:%d\n", graph, __func__, ret);
+ vipx_leave();
+ return 0;
+p_err:
return ret;
}
static int vipx_graph_request(struct vipx_graph *graph, struct vipx_task *task)
{
- int ret = 0;
+ int ret;
+ struct vipx_taskmgr *tmgr;
unsigned long flags;
- struct vipx_taskmgr *taskmgr;
-
- BUG_ON(!graph);
- BUG_ON(!task);
-
- taskmgr = &graph->taskmgr;
+ vipx_enter();
+ tmgr = &graph->taskmgr;
if (task->state != VIPX_TASK_STATE_REQUEST) {
- vipx_ierr("task state(%d) is invalid\n", graph, task->state);
- BUG();
+ ret = -EINVAL;
+ vipx_err("task state(%u) is not REQUEST (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
}
- taskmgr_e_barrier_irqs(taskmgr, 0, flags);
- vipx_task_trans_req_to_pre(taskmgr, task);
- taskmgr_x_barrier_irqr(taskmgr, 0, flags);
+ taskmgr_e_barrier_irqs(tmgr, 0, flags);
+ vipx_task_trans_req_to_pre(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, 0, flags);
if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags))
- vipx_get_timestamp(&task->time[VIPX_TMP_REQUEST]);
+ vipx_get_timestamp(&task->time[VIPX_TIME_REQUEST]);
- vipx_idbg("%s:%d\n", graph, __func__, ret);
+ vipx_leave();
+ return 0;
+p_err:
return ret;
}
static int vipx_graph_process(struct vipx_graph *graph, struct vipx_task *task)
{
- int ret = 0;
+ int ret;
+ struct vipx_taskmgr *tmgr;
unsigned long flags;
- struct vipx_taskmgr *taskmgr;
-
- BUG_ON(!graph);
- BUG_ON(!task);
-
- taskmgr = &graph->taskmgr;
+ vipx_enter();
+ tmgr = &graph->taskmgr;
if (task->state != VIPX_TASK_STATE_PREPARE) {
- vipx_ierr("task state(%d) is invalid\n", graph, task->state);
- BUG();
+ ret = -EINVAL;
+ vipx_err("task state(%u) is not PREPARE (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
}
- taskmgr_e_barrier_irqs(taskmgr, TASKMGR_IDX_0, flags);
- vipx_task_trans_pre_to_pro(taskmgr, task);
- taskmgr_x_barrier_irqr(taskmgr, TASKMGR_IDX_0, flags);
-
-#ifdef DBG_STREAMING
- vipx_iinfo("PROCESS(%d, %d)\n", graph, task->index, task->id);
-#endif
+ taskmgr_e_barrier_irqs(tmgr, TASKMGR_IDX_0, flags);
+ vipx_task_trans_pre_to_pro(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, TASKMGR_IDX_0, flags);
if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags))
- vipx_get_timestamp(&task->time[VIPX_TMP_PROCESS]);
+ vipx_get_timestamp(&task->time[VIPX_TIME_PROCESS]);
- vipx_idbg("%s:%d\n", graph, __func__, ret);
+ vipx_leave();
+ return 0;
+p_err:
return ret;
}
static int vipx_graph_cancel(struct vipx_graph *graph, struct vipx_task *task)
{
- int ret = 0;
- unsigned long flags;
+ int ret;
+ struct vipx_taskmgr *tmgr;
+ struct vipx_queue_list *qlist;
+ struct vipx_container_list *incl, *otcl;
unsigned long result;
- struct vipx_taskmgr *taskmgr;
- struct vipx_queue *queue;
- struct vb_container_list *incl, *otcl;
-
- BUG_ON(!graph);
- BUG_ON(!task);
+ unsigned long flags;
- taskmgr = &graph->taskmgr;
- queue = &graph->vctx.queue;
+ vipx_enter();
+ tmgr = &graph->taskmgr;
+ qlist = &graph->vctx->queue_list;
incl = task->incl;
otcl = task->otcl;
- result = 0;
-
- if (!test_bit(VIPX_GRAPH_STATE_START, &graph->state)) {
- vipx_ierr("graph is NOT start\n", graph);
- BUG();
- }
if (task->state != VIPX_TASK_STATE_PROCESS) {
- vipx_ierr("task state(%d) is invalid\n", graph, task->state);
- BUG();
+ ret = -EINVAL;
+ vipx_err("task state(%u) is not PROCESS (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
}
if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags)) {
- vipx_get_timestamp(&task->time[VIPX_TMP_DONE]);
+ vipx_get_timestamp(&task->time[VIPX_TIME_DONE]);
if (incl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
memcpy(incl->timestamp, task->time, sizeof(task->time));
if (otcl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
memcpy(otcl->timestamp, task->time, sizeof(task->time));
-
-#ifdef DBG_TIMEMEASURE
- vipx_irinfo("[TM] G%d : QR(%ld), RR(%ld), RP(%ld), PD(%ld)\n", graph, task, graph->uid,
- VIPX_TIME_IN_US(task->time[VIPX_TMP_REQUEST]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_QUEUE]),
- VIPX_TIME_IN_US(task->time[VIPX_TMP_RESOURCE]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_REQUEST]),
- VIPX_TIME_IN_US(task->time[VIPX_TMP_PROCESS]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_RESOURCE]),
- VIPX_TIME_IN_US(task->time[VIPX_TMP_DONE]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_PROCESS]));
-#endif
}
-#ifdef DBG_STREAMING
- vipx_iinfo("NDONE(%d, %d)\n", graph, task->index, task->id);
-#endif
- set_bit(VS4L_CL_FLAG_DONE, &result);
- set_bit(VS4L_CL_FLAG_INVALID, &result);
-
- taskmgr_e_barrier_irqs(taskmgr, TASKMGR_IDX_0, flags);
- vipx_task_trans_pro_to_com(taskmgr, task);
- taskmgr_x_barrier_irqr(taskmgr, TASKMGR_IDX_0, flags);
+ taskmgr_e_barrier_irqs(tmgr, TASKMGR_IDX_0, flags);
+ vipx_task_trans_pro_to_com(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, TASKMGR_IDX_0, flags);
graph->recent = task->id;
graph->done_cnt++;
- vipx_queue_done(queue, incl, otcl, result);
- vipx_iinfo("%s:%d\n", graph, __func__, ret);
+ result = 0;
+ set_bit(VS4L_CL_FLAG_DONE, &result);
+ set_bit(VS4L_CL_FLAG_INVALID, &result);
+
+ vipx_queue_done(qlist, incl, otcl, result);
+
+ vipx_leave();
+ return 0;
+p_err:
return ret;
}
static int vipx_graph_done(struct vipx_graph *graph, struct vipx_task *task)
{
- int ret = 0;
- unsigned long flags;
+ int ret;
+ struct vipx_taskmgr *tmgr;
+ struct vipx_queue_list *qlist;
+ struct vipx_container_list *incl, *otcl;
unsigned long result;
- struct vipx_taskmgr *taskmgr;
- struct vipx_queue *queue;
- struct vb_container_list *incl, *otcl;
-
- BUG_ON(!graph);
- BUG_ON(!task);
+ unsigned long flags;
- taskmgr = &graph->taskmgr;
- queue = &graph->vctx.queue;
+ vipx_enter();
+ tmgr = &graph->taskmgr;
+ qlist = &graph->vctx->queue_list;
incl = task->incl;
otcl = task->otcl;
- result = 0;
-
- if (!test_bit(VIPX_GRAPH_STATE_START, &graph->state)) {
- vipx_ierr("graph is NOT start\n", graph);
- BUG();
- }
if (task->state != VIPX_TASK_STATE_PROCESS) {
- vipx_ierr("task state(%d) is invalid\n", graph, task->state);
- BUG();
+ ret = -EINVAL;
+ vipx_err("task state(%u) is not PROCESS (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
}
if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags)) {
- vipx_get_timestamp(&task->time[VIPX_TMP_DONE]);
+ vipx_get_timestamp(&task->time[VIPX_TIME_DONE]);
if (incl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
memcpy(incl->timestamp, task->time, sizeof(task->time));
if (otcl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
memcpy(otcl->timestamp, task->time, sizeof(task->time));
-
-#ifdef DBG_TIMEMEASURE
- vipx_irinfo("[TM] G%d : QR(%ld), RR(%ld), RP(%ld), PD(%ld)\n", graph, task, graph->uid,
- VIPX_TIME_IN_US(task->time[VIPX_TMP_REQUEST]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_QUEUE]),
- VIPX_TIME_IN_US(task->time[VIPX_TMP_RESOURCE]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_REQUEST]),
- VIPX_TIME_IN_US(task->time[VIPX_TMP_PROCESS]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_RESOURCE]),
- VIPX_TIME_IN_US(task->time[VIPX_TMP_DONE]) -
- VIPX_TIME_IN_US(task->time[VIPX_TMP_PROCESS]));
-#endif
}
+ taskmgr_e_barrier_irqs(tmgr, TASKMGR_IDX_0, flags);
+ vipx_task_trans_pro_to_com(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, TASKMGR_IDX_0, flags);
+
+ graph->recent = task->id;
+ graph->done_cnt++;
+
+ result = 0;
if (task->param0) {
-#ifdef DBG_STREAMING
- vipx_iinfo("NDONE(%d, %d)\n", graph, task->index, task->id);
-#endif
set_bit(VS4L_CL_FLAG_DONE, &result);
set_bit(VS4L_CL_FLAG_INVALID, &result);
} else {
-#ifdef DBG_STREAMING
- vipx_iinfo("DONE(%d, %d)\n", graph, task->index, task->id);
-#endif
set_bit(VS4L_CL_FLAG_DONE, &result);
}
- taskmgr_e_barrier_irqs(taskmgr, TASKMGR_IDX_0, flags);
- vipx_task_trans_pro_to_com(taskmgr, task);
- taskmgr_x_barrier_irqr(taskmgr, TASKMGR_IDX_0, flags);
+ vipx_queue_done(qlist, incl, otcl, result);
- graph->recent = task->id;
- graph->done_cnt++;
- vipx_queue_done(queue, incl, otcl, result);
-
- vipx_idbg("%s:%d\n", graph, __func__, ret);
+ vipx_leave();
+ return 0;
+p_err:
return ret;
}
-static int vipx_graph_update_param(struct vipx_graph *graph, struct vipx_task *task)
+static int vipx_graph_update_param(struct vipx_graph *graph,
+ struct vipx_task *task)
{
- int ret = 0;
-
- vipx_iinfo("%s:%d\n", graph, __func__, ret);
- return ret;
+ vipx_enter();
+ vipx_leave();
+ return 0;
}
-const struct vipx_graph_ops vipx_graph_ops = {
+static const struct vipx_graph_ops vipx_graph_ops = {
.control = vipx_graph_control,
.request = vipx_graph_request,
.process = vipx_graph_process,
- .cancel = vipx_graph_cancel,
+ .cancel = vipx_graph_cancel,
.done = vipx_graph_done,
.update_param = vipx_graph_update_param
};
+
+static struct vipx_graph_model *vipx_graph_create_model(
+ struct vipx_graph *graph, struct vipx_common_graph_info *ginfo)
+{
+ int ret;
+ struct vipx_graph_model *gmodel;
+
+ vipx_enter();
+ gmodel = kzalloc(sizeof(*gmodel), GFP_KERNEL);
+ if (!gmodel) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc graph model\n");
+ goto p_err_alloc;
+ }
+
+ list_add_tail(&gmodel->list, &graph->gmodel_list);
+ graph->gmodel_count++;
+
+ gmodel->id = ginfo->gid;
+ INIT_LIST_HEAD(&gmodel->kbin_list);
+
+ memcpy(&gmodel->common_ginfo, ginfo, sizeof(gmodel->common_ginfo));
+
+ vipx_leave();
+ return gmodel;
+p_err_alloc:
+ return ERR_PTR(ret);
+}
+
+static struct vipx_graph_model *vipx_graph_get_model(struct vipx_graph *graph,
+ unsigned int id)
+{
+ unsigned int model_id = GET_COMMON_GRAPH_MODEL_ID(id);
+ struct vipx_graph_model *gmodel, *temp;
+
+ vipx_enter();
+ list_for_each_entry_safe(gmodel, temp, &graph->gmodel_list, list) {
+ unsigned int gmodel_id = GET_COMMON_GRAPH_MODEL_ID(gmodel->id);
+
+ if (gmodel_id == model_id) {
+ vipx_leave();
+ return gmodel;
+ }
+ }
+
+ vipx_err("Failed to get gmodel (%d/%u)\n", id, graph->gmodel_count);
+ return ERR_PTR(-EINVAL);
+}
+
+static int vipx_graph_destroy_model(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel)
+{
+ vipx_enter();
+ graph->gmodel_count--;
+ list_del(&gmodel->list);
+ kfree(gmodel);
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_graph_register_model(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel)
+{
+ int ret;
+
+ vipx_enter();
+ ret = vipx_graphmgr_register_model(graph->owner, gmodel);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_graph_unregister_model(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel)
+{
+ int ret;
+
+ vipx_enter();
+ ret = vipx_graphmgr_unregister_model(graph->owner, gmodel);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vipx_graph_start_model(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_graph_stop_model(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+static int vipx_graph_execute_model(struct vipx_graph *graph,
+ struct vipx_graph_model *gmodel,
+ struct vipx_common_execute_info *einfo)
+{
+ int ret;
+
+ vipx_enter();
+ ret = vipx_graphmgr_execute_model(graph->owner, gmodel, einfo);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+const struct vipx_context_gops vipx_context_gops = {
+ .create_model = vipx_graph_create_model,
+ .get_model = vipx_graph_get_model,
+ .destroy_model = vipx_graph_destroy_model,
+ .register_model = vipx_graph_register_model,
+ .unregister_model = vipx_graph_unregister_model,
+ .start_model = vipx_graph_start_model,
+ .stop_model = vipx_graph_stop_model,
+ .execute_model = vipx_graph_execute_model
+};
+
+void vipx_graph_print(struct vipx_graph *graph)
+{
+ vipx_enter();
+ vipx_leave();
+}
+
+struct vipx_graph *vipx_graph_create(struct vipx_context *vctx,
+ void *graphmgr)
+{
+ int ret;
+ struct vipx_graph *graph;
+ struct vipx_taskmgr *tmgr;
+ unsigned int idx;
+
+ vipx_enter();
+ graph = kzalloc(sizeof(*graph), GFP_KERNEL);
+ if (!graph) {
+ ret = -ENOMEM;
+ vipx_err("Failed to allocate graph\n");
+ goto p_err_kzalloc;
+ }
+
+ ret = vipx_graphmgr_grp_register(graphmgr, graph);
+ if (ret)
+ goto p_err_grp_register;
+
+ graph->owner = graphmgr;
+ graph->gops = &vipx_graph_ops;
+ mutex_init(&graph->local_lock);
+ graph->control.owner = graph;
+ graph->control.message = VIPX_CTRL_NONE;
+ init_waitqueue_head(&graph->control_wq);
+
+ tmgr = &graph->taskmgr;
+ spin_lock_init(&tmgr->slock);
+ ret = vipx_task_init(tmgr, graph->idx, graph);
+ if (ret)
+ goto p_err_task_init;
+
+ for (idx = 0; idx < VIPX_MAX_TASK; ++idx) {
+ graph->inhash[idx] = VIPX_MAX_TASK;
+ graph->othash[idx] = VIPX_MAX_TASK;
+ }
+
+ INIT_LIST_HEAD(&graph->gmodel_list);
+ graph->gmodel_count = 0;
+
+ vctx->graph_ops = &vipx_context_gops;
+ graph->vctx = vctx;
+
+ vipx_leave();
+ return graph;
+p_err_task_init:
+p_err_grp_register:
+ kfree(graph);
+p_err_kzalloc:
+ return ERR_PTR(ret);
+}
+
+int vipx_graph_destroy(struct vipx_graph *graph)
+{
+ vipx_enter();
+ __vipx_graph_stop(graph);
+ vipx_graphmgr_grp_unregister(graph->owner, graph);
+
+ kfree(graph->inflist.formats);
+ graph->inflist.formats = NULL;
+ graph->inflist.count = 0;
+
+ kfree(graph->otflist.formats);
+ graph->otflist.formats = NULL;
+ graph->otflist.count = 0;
+
+ kfree(graph);
+ vipx_leave();
+ return 0;
+}
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#ifndef __VIPX_GRAPH_H__
+#define __VIPX_GRAPH_H__
-#ifndef VIPX_GRAPH_H_
-#define VIPX_GRAPH_H_
-
-#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include "vipx-config.h"
#include "vs4l.h"
#include "vipx-taskmgr.h"
-#include "vipx-vertex.h"
-#include "vipx-interface.h"
-
-#define VIPX_GRAPH_MAX_VERTEX 20
-#define VIPX_GRAPH_MAX_PRIORITY 20
-#define VIPX_GRAPH_MAX_INTERMEDIATE 64
-#define VIPX_GRAPH_MAX_LEVEL 20
-#define VIPX_GRAPH_STOP_TIMEOUT (3 * HZ)
+#include "vipx-common-type.h"
+struct vips_context;
struct vipx_graph;
enum vipx_graph_state {
};
struct vipx_format {
- u32 format;
- u32 plane;
- u32 width;
- u32 height;
+ unsigned int format;
+ unsigned int plane;
+ unsigned int width;
+ unsigned int height;
};
struct vipx_format_list {
- u32 count;
- struct vipx_format *formats;
+ unsigned int count;
+ struct vipx_format *formats;
};
struct vipx_graph_ops {
int (*update_param)(struct vipx_graph *graph, struct vipx_task *task);
};
-struct vipx_graph_intermediate {
- u32 buffer_index;
- u32 *buffer;
- void *handle;
- int fd;
+struct vipx_graph_model {
+ unsigned int id;
+ struct list_head kbin_list;
+ unsigned int kbin_count;
+ struct vipx_common_graph_info common_ginfo;
+ struct vipx_buffer *graph;
+ struct vipx_buffer *temp_buf;
+ struct vipx_buffer *weight;
+ struct vipx_buffer *bias;
+
+ struct list_head list;
};
struct vipx_graph {
- u32 idx;
- u32 uid;
+ unsigned int idx;
unsigned long state;
- unsigned long flags;
- u32 priority;
- struct mutex local_lock;
struct mutex *global_lock;
- /* for debugging */
- u32 input_cnt;
- u32 cancel_cnt;
- u32 done_cnt;
- u32 recent;
-
+ void *owner;
const struct vipx_graph_ops *gops;
+ struct mutex local_lock;
+ struct vipx_task control;
+ wait_queue_head_t control_wq;
+ struct vipx_taskmgr taskmgr;
- void *cookie;
- void *memory;
- struct vipx_pipe *pipe;
- struct vipx_vertex_ctx vctx;
+ unsigned int uid;
+ unsigned long flags;
+ unsigned int priority;
- struct vipx_format_list informat_list;
- struct vipx_format_list otformat_list;
+ struct vipx_format_list inflist;
+ struct vipx_format_list otflist;
- u32 inhash[VIPX_MAX_TASK];
- u32 othash[VIPX_MAX_TASK];
- struct vipx_taskmgr taskmgr;
- struct vipx_task control;
- wait_queue_head_t control_wq;
+ unsigned int inhash[VIPX_MAX_TASK];
+ unsigned int othash[VIPX_MAX_TASK];
+
+ struct list_head gmodel_list;
+ unsigned int gmodel_count;
+
+ /* for debugging */
+ unsigned int input_cnt;
+ unsigned int cancel_cnt;
+ unsigned int done_cnt;
+ unsigned int recent;
+
+ struct vipx_context *vctx;
};
+extern const struct vipx_queue_gops vipx_queue_gops;
+extern const struct vipx_vctx_gops vipx_vctx_gops;
+
void vipx_graph_print(struct vipx_graph *graph);
-int vipx_graph_create(struct vipx_graph **graph, void *cookie, void *memory);
-int vipx_graph_destroy(struct vipx_graph *graph);
-int vipx_graph_config(struct vipx_graph *graph, struct vs4l_graph *info);
-int vipx_graph_param(struct vipx_graph *graph, struct vs4l_param_list *plist);
-#define CALL_GOPS(g, op, ...) (((g)->gops->op) ? ((g)->gops->op(g, ##__VA_ARGS__)) : 0)
+struct vipx_graph *vipx_graph_create(struct vipx_context *vctx,
+ void *graphmgr);
+int vipx_graph_destroy(struct vipx_graph *graph);
#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
#include <linux/sched/types.h>
-#include <linux/sched/rt.h>
-#include <linux/bug.h>
-#include <linux/scatterlist.h>
-
-#include "vipx-taskmgr.h"
+#include "vipx-log.h"
+#include "vipx-common-type.h"
#include "vipx-device.h"
#include "vipx-graphmgr.h"
-/*
- * task trans
- */
-
-static void __vipx_taskdesc_s_free(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_set_free(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
-
+ vipx_enter();
taskdesc->state = VIPX_TASKDESC_STATE_FREE;
-
- list_add_tail(&taskdesc->list, &graphmgr->tdfre_list);
- graphmgr->tdfre_cnt++;
+ list_add_tail(&taskdesc->list, &gmgr->tdfre_list);
+ gmgr->tdfre_cnt++;
+ vipx_leave();
}
-static void __vipx_taskdesc_free_head(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc **taskdesc)
+static struct vipx_taskdesc *__vipx_taskdesc_get_first_free(
+ struct vipx_graphmgr *gmgr)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
-
- if (graphmgr->tdfre_cnt)
- *taskdesc = container_of(graphmgr->tdfre_list.next, struct vipx_taskdesc, list);
- else
- *taskdesc = NULL;
+ struct vipx_taskdesc *taskdesc = NULL;
+
+ vipx_enter();
+ if (gmgr->tdfre_cnt)
+ taskdesc = list_first_entry(&gmgr->tdfre_list,
+ struct vipx_taskdesc, list);
+ vipx_leave();
+ return taskdesc;
}
-static void __vipx_taskdesc_s_ready(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_set_ready(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
-
- taskdesc->state = VIPX_TASKDESC_STATE_FREE;
-
- list_add_tail(&taskdesc->list, &graphmgr->tdrdy_list);
- graphmgr->tdrdy_cnt++;
+ vipx_enter();
+ taskdesc->state = VIPX_TASKDESC_STATE_READY;
+ list_add_tail(&taskdesc->list, &gmgr->tdrdy_list);
+ gmgr->tdrdy_cnt++;
+ vipx_leave();
}
-static void __vipx_taskdesc_g_ready(struct vipx_graphmgr *graphmgr,
- struct vipx_taskdesc **taskdesc)
+static struct vipx_taskdesc *__vipx_taskdesc_pop_ready(
+ struct vipx_graphmgr *gmgr)
{
- if (graphmgr->tdrdy_cnt &&
- (*taskdesc = container_of(graphmgr->tdrdy_list.next, struct vipx_taskdesc, list))) {
- list_del(&(*taskdesc)->list);
- graphmgr->tdrdy_cnt--;
- (*taskdesc)->state = VIPX_TASK_STATE_INVALID;
- } else {
- *taskdesc = NULL;
+ struct vipx_taskdesc *taskdesc = NULL;
+
+ vipx_enter();
+ if (gmgr->tdrdy_cnt) {
+ taskdesc = container_of(gmgr->tdrdy_list.next,
+ struct vipx_taskdesc, list);
+
+ list_del(&taskdesc->list);
+ gmgr->tdrdy_cnt--;
}
+ vipx_leave();
+ return taskdesc;
}
-static void __vipx_taskdesc_s_request(struct vipx_graphmgr *graphmgr,
- struct vipx_taskdesc *prev,
- struct vipx_taskdesc *taskdesc,
- struct vipx_taskdesc *next)
+static void __vipx_taskdesc_set_request(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
+ vipx_enter();
taskdesc->state = VIPX_TASKDESC_STATE_REQUEST;
-
- if (prev && next) {
- next->list.prev = &taskdesc->list;
- taskdesc->list.next = &next->list;
- taskdesc->list.prev = &prev->list;
- prev->list.next = &taskdesc->list;
- } else if (prev) {
- list_add_tail(&taskdesc->list, &graphmgr->tdreq_list);
- } else {
- list_add(&taskdesc->list, &graphmgr->tdreq_list);
- }
-
- graphmgr->tdreq_cnt++;
+ list_add_tail(&taskdesc->list, &gmgr->tdreq_list);
+ gmgr->tdreq_cnt++;
+ vipx_leave();
}
-static void __vipx_taskdesc_s_alloc(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_set_request_sched(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc, struct vipx_taskdesc *next)
{
- taskdesc->state = VIPX_TASKDESC_STATE_ALLOC;
- list_add_tail(&taskdesc->list, &graphmgr->tdalc_list);
- graphmgr->tdalc_cnt++;
+ vipx_enter();
+ taskdesc->state = VIPX_TASKDESC_STATE_REQUEST;
+ list_add_tail(&taskdesc->list, &next->list);
+ gmgr->tdreq_cnt++;
+ vipx_leave();
}
-void __vipx_taskdesc_s_process(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_set_process(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
-
+ vipx_enter();
taskdesc->state = VIPX_TASKDESC_STATE_PROCESS;
- list_add_tail(&taskdesc->list, &graphmgr->tdpro_list);
- graphmgr->tdpro_cnt++;
+ list_add_tail(&taskdesc->list, &gmgr->tdpro_list);
+ gmgr->tdpro_cnt++;
+ vipx_leave();
}
-static void __vipx_taskdesc_s_complete(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_set_complete(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
-
+ vipx_enter();
taskdesc->state = VIPX_TASKDESC_STATE_COMPLETE;
- list_add_tail(&taskdesc->list, &graphmgr->tdcom_list);
- graphmgr->tdcom_cnt++;
+ list_add_tail(&taskdesc->list, &gmgr->tdcom_list);
+ gmgr->tdcom_cnt++;
+ vipx_leave();
}
-void __vipx_taskdesc_trans_fre_to_rdy(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_fre_to_rdy(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdfre_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_FREE);
-
- list_del(&taskdesc->list);
- graphmgr->tdfre_cnt--;
- __vipx_taskdesc_s_ready(graphmgr, taskdesc);
-}
+ vipx_enter();
+ if (!gmgr->tdfre_cnt) {
+ vipx_warn("tdfre_cnt is zero\n");
+ return;
+ }
-void __vipx_taskdesc_trans_rdy_to_fre(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
-{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdrdy_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_READY);
+ if (taskdesc->state != VIPX_TASKDESC_STATE_FREE) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdrdy_cnt--;
- __vipx_taskdesc_s_free(graphmgr, taskdesc);
+ gmgr->tdfre_cnt--;
+ __vipx_taskdesc_set_ready(gmgr, taskdesc);
+ vipx_leave();
}
-void __vipx_taskdesc_trans_req_to_alc(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_rdy_to_fre(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdreq_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_REQUEST);
-
- list_del(&taskdesc->list);
- graphmgr->tdreq_cnt--;
- __vipx_taskdesc_s_alloc(graphmgr, taskdesc);
-}
+ vipx_enter();
+ if (!gmgr->tdrdy_cnt) {
+ vipx_warn("tdrdy_cnt is zero\n");
+ return;
+ }
-void __vipx_taskdesc_trans_req_to_pro(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
-{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdreq_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_REQUEST);
+ if (taskdesc->state != VIPX_TASKDESC_STATE_READY) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdreq_cnt--;
- __vipx_taskdesc_s_process(graphmgr, taskdesc);
+ gmgr->tdrdy_cnt--;
+ __vipx_taskdesc_set_free(gmgr, taskdesc);
+ vipx_leave();
}
-void __vipx_taskdesc_trans_req_to_fre(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_req_to_pro(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdreq_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_REQUEST);
+ vipx_enter();
+ if (!gmgr->tdreq_cnt) {
+ vipx_warn("tdreq_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VIPX_TASKDESC_STATE_REQUEST) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdreq_cnt--;
- __vipx_taskdesc_s_free(graphmgr, taskdesc);
+ gmgr->tdreq_cnt--;
+ __vipx_taskdesc_set_process(gmgr, taskdesc);
+ vipx_leave();
}
-void __vipx_taskdesc_trans_alc_to_pro(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_req_to_fre(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdalc_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_ALLOC);
+ vipx_enter();
+ if (!gmgr->tdreq_cnt) {
+ vipx_warn("tdreq_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VIPX_TASKDESC_STATE_REQUEST) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdalc_cnt--;
- __vipx_taskdesc_s_process(graphmgr, taskdesc);
+ gmgr->tdreq_cnt--;
+ __vipx_taskdesc_set_free(gmgr, taskdesc);
+ vipx_leave();
}
-void __vipx_taskdesc_trans_alc_to_fre(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_alc_to_pro(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdalc_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_ALLOC);
+ vipx_enter();
+ if (!gmgr->tdalc_cnt) {
+ vipx_warn("tdalc_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VIPX_TASKDESC_STATE_ALLOC) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdalc_cnt--;
- __vipx_taskdesc_s_free(graphmgr, taskdesc);
+ gmgr->tdalc_cnt--;
+ __vipx_taskdesc_set_process(gmgr, taskdesc);
+ vipx_leave();
}
-static void __vipx_taskdesc_trans_pro_to_com(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_alc_to_fre(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdpro_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_PROCESS);
+ vipx_enter();
+ if (!gmgr->tdalc_cnt) {
+ vipx_warn("tdalc_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VIPX_TASKDESC_STATE_ALLOC) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdpro_cnt--;
- __vipx_taskdesc_s_complete(graphmgr, taskdesc);
+ gmgr->tdalc_cnt--;
+ __vipx_taskdesc_set_free(gmgr, taskdesc);
+ vipx_leave();
}
-void __vipx_taskdesc_trans_pro_to_fre(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_pro_to_com(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdpro_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_PROCESS);
+ vipx_enter();
+ if (!gmgr->tdpro_cnt) {
+ vipx_warn("tdpro_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VIPX_TASKDESC_STATE_PROCESS) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdpro_cnt--;
- __vipx_taskdesc_s_free(graphmgr, taskdesc);
+ gmgr->tdpro_cnt--;
+ __vipx_taskdesc_set_complete(gmgr, taskdesc);
+ vipx_leave();
}
-static void __vipx_taskdesc_trans_com_to_fre(struct vipx_graphmgr *graphmgr, struct vipx_taskdesc *taskdesc)
+static void __vipx_taskdesc_trans_pro_to_fre(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
- BUG_ON(!graphmgr);
- BUG_ON(!taskdesc);
- BUG_ON(!graphmgr->tdcom_cnt);
- BUG_ON(taskdesc->state != VIPX_TASKDESC_STATE_COMPLETE);
+ vipx_enter();
+ if (!gmgr->tdpro_cnt) {
+ vipx_warn("tdpro_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VIPX_TASKDESC_STATE_PROCESS) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
list_del(&taskdesc->list);
- graphmgr->tdcom_cnt--;
- __vipx_taskdesc_s_free(graphmgr, taskdesc);
+ gmgr->tdpro_cnt--;
+ __vipx_taskdesc_set_free(gmgr, taskdesc);
+ vipx_leave();
}
-/*
- * task trans END
- */
-
-void vipx_taskdesc_print(struct vipx_graphmgr *graphmgr)
+static void __vipx_taskdesc_trans_com_to_fre(struct vipx_graphmgr *gmgr,
+ struct vipx_taskdesc *taskdesc)
{
-}
+ vipx_enter();
+ if (!gmgr->tdcom_cnt) {
+ vipx_warn("tdcom_cnt is zero\n");
+ return;
+ }
-#ifdef USE_TASK_TIMER
-static int vipx_time_thread(void *data)
-{
- int ret = 0;
+ if (taskdesc->state != VIPX_TASKDESC_STATE_COMPLETE) {
+ vipx_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
- return ret;
+ list_del(&taskdesc->list);
+ gmgr->tdcom_cnt--;
+ __vipx_taskdesc_set_free(gmgr, taskdesc);
+ vipx_leave();
}
-#endif
-static int __vipx_itf_init(struct vipx_interface *interface,
- struct vipx_graph *graph)
+static int __vipx_graphmgr_itf_init(struct vipx_interface *itf,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ int ret;
unsigned long flags;
struct vipx_taskmgr *itaskmgr;
struct vipx_task *itask;
- BUG_ON(!interface);
- BUG_ON(!graph);
-
- itaskmgr = &interface->taskmgr;
-
- ret = vipx_hw_enum(interface);
- if (ret) {
- vipx_err("vipx_hw_enum is fail(%d)\n", ret);
- goto p_err;
- }
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_pick_fre_to_req(itaskmgr, &itask);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
if (!itask) {
- vipx_err("itask is NULL\n");
ret = -ENOMEM;
- goto p_err;
+ vipx_err("itask is NULL\n");
+ goto p_err_pick;
}
itask->id = 0;
itask->param2 = 0;
itask->param3 = 0;
- ret = vipx_hw_init(interface, itask);
- if (ret) {
- vipx_err("vipx_hw_init is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = vipx_hw_init(itf, itask);
+ if (ret)
+ goto p_err_init;
-p_err:
- vipx_info("%s:\n", __func__);
+ vipx_leave();
+ return 0;
+p_err_init:
+p_err_pick:
return ret;
}
-static int __vipx_itf_deinit(struct vipx_interface *interface,
- struct vipx_graph *graph)
+static int __vipx_graphmgr_itf_deinit(struct vipx_interface *itf,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ int ret;
unsigned long flags;
struct vipx_taskmgr *itaskmgr;
struct vipx_task *itask;
- BUG_ON(!interface);
- BUG_ON(!graph);
-
- itaskmgr = &interface->taskmgr;
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_pick_fre_to_req(itaskmgr, &itask);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
if (!itask) {
- vipx_err("itask is NULL\n");
ret = -ENOMEM;
- goto p_err;
+ vipx_err("itask is NULL\n");
+ goto p_err_pick;
}
itask->id = 0;
itask->param2 = 0;
itask->param3 = 0;
- ret = vipx_hw_deinit(interface, itask);
- if (ret) {
- vipx_err("vipx_hw_deinit is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = vipx_hw_deinit(itf, itask);
+ if (ret)
+ goto p_err_deinit;
-p_err:
- vipx_info("%s:\n", __func__);
+ vipx_leave();
+ return 0;
+p_err_deinit:
+p_err_pick:
return ret;
}
-int __vipx_itf_create(struct vipx_interface *interface,
- struct vipx_graph *graph)
+static int __vipx_graphmgr_itf_create(struct vipx_interface *itf,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ int ret;
unsigned long flags;
struct vipx_taskmgr *itaskmgr;
struct vipx_task *itask;
- BUG_ON(!interface);
- BUG_ON(!graph);
-
- itaskmgr = &interface->taskmgr;
+ /* TODO check */
+ return 0;
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_pick_fre_to_req(itaskmgr, &itask);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
if (!itask) {
- vipx_err("itask is NULL\n");
ret = -ENOMEM;
+ vipx_err("itask is NULL\n");
goto p_err;
}
-#ifdef DBG_TIMEMEASURE
- vipx_get_timestamp(&itask->time[VIPX_TMP_REQUEST]);
-#endif
-
itask->id = 0;
itask->lock = &graph->local_lock;
itask->findex = VIPX_MAX_TASK;
itask->message = VIPX_TASK_CREATE;
itask->param0 = graph->uid;
itask->param1 = graph->idx;
- itask->param2 = (ulong)0;
- itask->param3 = (ulong)0;
+ itask->param2 = 0;
+ itask->param3 = 0;
- ret = vipx_hw_create(interface, itask);
- if (ret) {
- vipx_err("vipx_hw_create is fail(%d)\n", ret);
+ ret = vipx_hw_create(itf, itask);
+ if (ret)
goto p_err;
- }
-
-#ifdef DBG_TIMEMEASURE
- vipx_iinfo("[TM] C : %ldus, %ldus\n", graph,
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_PROCESS]) -
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_REQUEST]),
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_DONE]) -
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_PROCESS]));
-#endif
+ vipx_leave();
+ return 0;
p_err:
- vipx_info("%s:\n", __func__);
return ret;
}
-static int __vipx_itf_destroy(struct vipx_interface *interface,
- struct vipx_graph *graph)
+static int __vipx_graphmgr_itf_destroy(struct vipx_interface *itf,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ int ret;
unsigned long flags;
struct vipx_taskmgr *itaskmgr;
struct vipx_task *itask;
- BUG_ON(!interface);
- BUG_ON(!graph);
-
- itaskmgr = &interface->taskmgr;
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_pick_fre_to_req(itaskmgr, &itask);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
if (!itask) {
- vipx_err("itask is NULL\n");
ret = -ENOMEM;
+ vipx_err("itask is NULL\n");
goto p_err;
}
-#ifdef DBG_TIMEMEASURE
- vipx_get_timestamp(&itask->time[VIPX_TMP_REQUEST]);
-#endif
-
itask->id = 0;
itask->lock = &graph->local_lock;
itask->findex = VIPX_MAX_TASK;
itask->param2 = 0;
itask->param3 = 0;
- ret = vipx_hw_destroy(interface, itask);
- if (ret) {
- vipx_err("vipx_hw_destory is fail(%d)\n", ret);
+ ret = vipx_hw_destroy(itf, itask);
+ if (ret)
goto p_err;
- }
-
-#ifdef DBG_TIMEMEASURE
- vipx_iinfo("[TM] D : %ldus, %ldus\n", graph,
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_PROCESS]) -
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_REQUEST]),
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_DONE]) -
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_PROCESS]));
-#endif
+ vipx_leave();
+ return 0;
p_err:
- vipx_info("%s:\n", __func__);
return ret;
}
-static int __vipx_itf_config(struct vipx_interface *interface,
- struct vipx_graph *graph)
+static int __vipx_graphmgr_itf_config(struct vipx_interface *itf,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ int ret;
unsigned long flags;
struct vipx_taskmgr *itaskmgr;
struct vipx_task *itask;
- BUG_ON(!interface);
- BUG_ON(!graph);
-
- itaskmgr = &interface->taskmgr;
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_pick_fre_to_req(itaskmgr, &itask);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
if (!itask) {
- vipx_err("itask is NULL\n");
ret = -ENOMEM;
+ vipx_err("itask is NULL\n");
goto p_err;
}
-#ifdef DBG_TIMEMEASURE
- vipx_get_timestamp(&itask->time[VIPX_TMP_REQUEST]);
-#endif
-
itask->id = 0;
itask->lock = &graph->local_lock;
itask->findex = VIPX_MAX_TASK;
itask->message = VIPX_TASK_ALLOCATE;
itask->param0 = graph->uid;
itask->param1 = graph->idx;
- itask->param2 = (ulong)&graph->informat_list;
- itask->param3 = (ulong)&graph->otformat_list;
+ itask->param2 = (unsigned long)&graph->inflist;
+ itask->param3 = (unsigned long)&graph->otflist;
- ret = vipx_hw_config(interface, itask);
- if (ret) {
- vipx_err("vipx_hw_config is fail(%d)\n", ret);
+ ret = vipx_hw_config(itf, itask);
+ if (ret)
goto p_err;
- }
-
-#ifdef DBG_TIMEMEASURE
- vipx_iinfo("[TM] A : %ldus, %ldus\n", graph,
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_PROCESS]) -
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_REQUEST]),
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_DONE]) -
- VIPX_TIME_IN_US(itask->time[VIPX_TMP_PROCESS]));
-#endif
+ vipx_leave();
+ return 0;
p_err:
- vipx_info("%s:\n", __func__);
return ret;
}
-static int __vipx_itf_process(struct vipx_interface *interface,
- struct vipx_graph *graph,
- struct vipx_task *task)
+static int __vipx_graphmgr_itf_process(struct vipx_interface *itf,
+ struct vipx_graph *graph, struct vipx_task *task)
{
- int ret = 0;
+ int ret;
unsigned long flags;
struct vipx_taskmgr *itaskmgr;
struct vipx_task *itask;
- BUG_ON(!interface);
- BUG_ON(!graph);
- BUG_ON(!task);
-
- itaskmgr = &interface->taskmgr;
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
- vipx_task_pick_fre_to_req(itaskmgr, &itask);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
-
if (!itask) {
- vipx_err("itask is NULL\n");
ret = -ENOMEM;
- goto p_err;
+ vipx_err("itask is NULL\n");
+ goto p_err_pick;
}
if (test_bit(VIPX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags)) {
- ret = CALL_GOPS(graph, update_param, task);
- if (ret) {
- vipx_err("GOPS(update_param) is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = graph->gops->update_param(graph, task);
+ if (ret)
+ goto p_err_update_param;
clear_bit(VIPX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags);
}
itask->message = VIPX_TASK_PROCESS;
itask->param0 = graph->uid;
itask->param1 = graph->idx;
- itask->param2 = (ulong)0; /* return : DONE or NDONE */
+ itask->param2 = 0; /* return : DONE or NDONE */
itask->param3 = 0; /* return : error code if param2 is NDONE */
itask->flags = task->flags;
itask->incl = task->incl;
itask->otcl = task->otcl;
- ret = CALL_GOPS(graph, process, task);
- if (ret) {
- vipx_err("GOPS(process) is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = graph->gops->process(graph, task);
+ if (ret)
+ goto p_err_process;
- ret = vipx_hw_process(interface, itask);
- if (ret) {
- vipx_err("vipx_hw_process is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = vipx_hw_process(itf, itask);
+ if (ret)
+ goto p_err_hw_process;
-p_err:
- vipx_dbg("%s:\n", __func__);
+ vipx_leave();
+ return 0;
+p_err_hw_process:
+p_err_process:
+p_err_update_param:
+p_err_pick:
return ret;
}
-static void __vipx_graphmgr_sched(struct vipx_graphmgr *graphmgr)
+static void __vipx_graphmgr_sched(struct vipx_graphmgr *gmgr)
{
- int ret = 0;
+ int ret;
+ struct vipx_taskdesc *ready_td, *list_td, *temp;
struct vipx_graph *graph;
struct vipx_task *task;
- struct vipx_taskdesc *ready, *request, *process, *prev, *next, *temp;
- struct vipx_interface *interface;
- interface = graphmgr->interface;
- ready = NULL;
- request = NULL;
- prev = NULL;
- next = NULL;
+ vipx_enter();
- mutex_lock(&graphmgr->tdlock);
+ mutex_lock(&gmgr->tdlock);
- /* 1. priority order */
while (1) {
- __vipx_taskdesc_g_ready(graphmgr, &ready);
- if (!ready)
+ ready_td = __vipx_taskdesc_pop_ready(gmgr);
+ if (!ready_td)
break;
- list_for_each_entry_safe(next, temp, &graphmgr->tdreq_list, list) {
- if (ready->priority > next->priority)
- break;
+ if (!gmgr->tdreq_cnt) {
+ __vipx_taskdesc_set_request(gmgr, ready_td);
+ continue;
+ }
- prev = next;
- next = NULL;
+ list_for_each_entry_safe(list_td, temp, &gmgr->tdreq_list,
+ list) {
+ if (ready_td->priority > list_td->priority) {
+ __vipx_taskdesc_set_request_sched(gmgr,
+ ready_td, list_td);
+ break;
+ }
}
- __vipx_taskdesc_s_request(graphmgr, prev, ready, next);
+ if (ready_td->state == VIPX_TASKDESC_STATE_READY)
+ __vipx_taskdesc_set_request(gmgr, ready_td);
}
- /* 2. */
- list_for_each_entry_safe(request, temp, &graphmgr->tdreq_list, list) {
- graph = request->graph;
- task = request->task;
-
- __vipx_taskdesc_trans_req_to_pro(graphmgr, request);
+ list_for_each_entry_safe(list_td, temp, &gmgr->tdreq_list, list) {
+ __vipx_taskdesc_trans_req_to_pro(gmgr, list_td);
}
- mutex_unlock(&graphmgr->tdlock);
+ mutex_unlock(&gmgr->tdlock);
- /* 3. process graph */
- list_for_each_entry_safe(process, temp, &graphmgr->tdpro_list, list) {
- graph = process->graph;
- task = process->task;
+ list_for_each_entry_safe(list_td, temp, &gmgr->tdpro_list, list) {
+ graph = list_td->graph;
+ task = list_td->task;
- ret = __vipx_itf_process(interface, graph, task);
+ ret = __vipx_graphmgr_itf_process(gmgr->interface, graph, task);
if (ret) {
- vipx_err("__vipx_itf_process is fail(%d)\n", ret);
-
- ret = CALL_GOPS(graph, cancel, task);
- if (ret) {
- vipx_err("CALL_GOPS(cancel) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->cancel(graph, task);
+ if (ret)
+ return;
- process->graph = NULL;
- process->task = NULL;
- __vipx_taskdesc_trans_pro_to_fre(graphmgr, process);
+ list_td->graph = NULL;
+ list_td->task = NULL;
+ __vipx_taskdesc_trans_pro_to_fre(gmgr, list_td);
continue;
}
- __vipx_taskdesc_trans_pro_to_com(graphmgr, process);
+ __vipx_taskdesc_trans_pro_to_com(gmgr, list_td);
}
- graphmgr->sched_cnt++;
- vipx_dbg("[%s:%d]\n", __func__, __LINE__);
+ gmgr->sched_cnt++;
+ vipx_leave();
}
static void vipx_graph_thread(struct kthread_work *work)
{
- int ret = 0;
- struct vipx_graphmgr *graphmgr;
- struct vipx_graph *graph;
+ int ret;
struct vipx_task *task;
- struct vipx_taskdesc *taskdesc, * temp;
-
- BUG_ON(!work);
+ struct vipx_graph *graph;
+ struct vipx_graphmgr *gmgr;
+ struct vipx_taskdesc *taskdesc, *temp;
+ vipx_enter();
task = container_of(work, struct vipx_task, work);
graph = task->owner;
- graphmgr = graph->cookie;
+ gmgr = graph->owner;
switch (task->message) {
case VIPX_TASK_REQUEST:
- ret = CALL_GOPS(graph, request, task);
- if (ret) {
- vipx_err("CALL_GOPS(request) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->request(graph, task);
+ if (ret)
+ return;
- __vipx_taskdesc_free_head(graphmgr, &taskdesc);
+ taskdesc = __vipx_taskdesc_get_first_free(gmgr);
if (!taskdesc) {
vipx_err("taskdesc is NULL\n");
- BUG();
+ return;
}
task->tdindex = taskdesc->index;
taskdesc->graph = graph;
taskdesc->task = task;
taskdesc->priority = graph->priority;
- __vipx_taskdesc_trans_fre_to_rdy(graphmgr, taskdesc);
+ __vipx_taskdesc_trans_fre_to_rdy(gmgr, taskdesc);
break;
case VIPX_CTRL_STOP:
- list_for_each_entry_safe(taskdesc, temp, &graphmgr->tdrdy_list, list) {
+ list_for_each_entry_safe(taskdesc, temp,
+ &gmgr->tdrdy_list, list) {
if (taskdesc->graph->idx != graph->idx)
continue;
- ret = CALL_GOPS(graph, cancel, taskdesc->task);
- if (ret) {
- vipx_err("CALL_GOPS(cancel) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->cancel(graph, taskdesc->task);
+ if (ret)
+ return;
taskdesc->graph = NULL;
taskdesc->task = NULL;
- __vipx_taskdesc_trans_rdy_to_fre(graphmgr, taskdesc);
+ __vipx_taskdesc_trans_rdy_to_fre(gmgr, taskdesc);
}
- mutex_lock(&graphmgr->tdlock);
+ mutex_lock(&gmgr->tdlock);
- list_for_each_entry_safe(taskdesc, temp, &graphmgr->tdreq_list, list) {
+ list_for_each_entry_safe(taskdesc, temp,
+ &gmgr->tdreq_list, list) {
if (taskdesc->graph->idx != graph->idx)
continue;
- ret = CALL_GOPS(graph, cancel, taskdesc->task);
- if (ret) {
- vipx_err("CALL_GOPS(cancel) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->cancel(graph, taskdesc->task);
+ if (ret)
+ return;
taskdesc->graph = NULL;
taskdesc->task = NULL;
- __vipx_taskdesc_trans_req_to_fre(graphmgr, taskdesc);
+ __vipx_taskdesc_trans_req_to_fre(gmgr, taskdesc);
}
- mutex_unlock(&graphmgr->tdlock);
+ mutex_unlock(&gmgr->tdlock);
- ret = CALL_GOPS(graph, control, task);
- if (ret) {
- vipx_err("CALL_GOPS(control) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->control(graph, task);
+ if (ret)
+ return;
+ /* TODO check return/break */
return;
default:
- BUG();
- break;
+ vipx_err("message of task is invalid (%d)\n", task->message);
+ return;
}
- __vipx_graphmgr_sched(graphmgr);
- vipx_dbg("[%s:%d]\n", __func__, __LINE__);
+ __vipx_graphmgr_sched(gmgr);
+ vipx_leave();
}
static void vipx_interface_thread(struct kthread_work *work)
{
- int ret = 0;
- u32 task_index;
- u32 taskdesc_index;
- unsigned long flag;
- struct vipx_graphmgr *graphmgr;
- struct vipx_graph *graph;
- struct vipx_taskmgr *itaskmgr;
+ int ret;
struct vipx_task *task, *itask;
+ struct vipx_interface *itf;
+ struct vipx_graphmgr *gmgr;
+ unsigned int taskdesc_index;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_graph *graph;
struct vipx_taskdesc *taskdesc;
- struct vipx_interface *interface;
-
- BUG_ON(!work);
+ unsigned long flags;
+ vipx_enter();
itask = container_of(work, struct vipx_task, work);
- interface = itask->owner;
- itaskmgr = &interface->taskmgr;
- graphmgr = interface->cookie;
+ itf = itask->owner;
+ itaskmgr = &itf->taskmgr;
+ gmgr = itf->cookie;
+
+ if (itask->findex >= VIPX_MAX_TASK) {
+ vipx_err("task index(%u) is invalid (%u)\n",
+ itask->findex, VIPX_MAX_TASK);
+ return;
+ }
switch (itask->message) {
case VIPX_TASK_ALLOCATE:
- task_index = itask->findex;
taskdesc_index = itask->tdindex;
-
if (taskdesc_index >= VIPX_MAX_TASKDESC) {
- vipx_err("taskdesc index(%d) is invalid\n", taskdesc_index);
- BUG();
+ vipx_err("taskdesc index(%u) is invalid (%u)\n",
+ taskdesc_index, VIPX_MAX_TASKDESC);
+ return;
}
- if (task_index >= VIPX_MAX_TASK) {
- vipx_err("task index(%d) is invalid\n", task_index);
- BUG();
- }
-
- taskdesc = &graphmgr->taskdesc[taskdesc_index];
+ taskdesc = &gmgr->taskdesc[taskdesc_index];
if (taskdesc->state != VIPX_TASKDESC_STATE_ALLOC) {
- vipx_err("taskdesc state is invalid(%d)\n", taskdesc->state);
- vipx_taskdesc_print(graphmgr);
- BUG();
+ vipx_err("taskdesc state(%u) is not ALLOC (%u)\n",
+ taskdesc->state, taskdesc_index);
+ return;
}
graph = taskdesc->graph;
if (!graph) {
- vipx_err("graph is NULL(%d)\n", taskdesc_index);
- BUG();
+ vipx_err("graph is NULL (%u)\n", taskdesc_index);
+ return;
}
task = taskdesc->task;
if (!task) {
- vipx_err("task is NULL(%d)\n", taskdesc_index);
- BUG();
+ vipx_err("task is NULL (%u)\n", taskdesc_index);
+ return;
}
task->message = itask->message;
/* return status check */
if (task->param0) {
- vipx_err("allocation is fail(%ld, %ld)\n", task->param0, task->param1);
+ vipx_err("task allocation failed (%lu, %lu)\n",
+ task->param0, task->param1);
- ret = CALL_GOPS(graph, cancel, task);
- if (ret) {
- vipx_err("CALL_GOPS(cancel) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->cancel(graph, task);
+ if (ret)
+ return;
/* taskdesc cleanup */
- mutex_lock(&graphmgr->tdlock);
+ mutex_lock(&gmgr->tdlock);
taskdesc->graph = NULL;
taskdesc->task = NULL;
- __vipx_taskdesc_trans_alc_to_fre(graphmgr, taskdesc);
- mutex_unlock(&graphmgr->tdlock);
+ __vipx_taskdesc_trans_alc_to_fre(gmgr, taskdesc);
+ mutex_unlock(&gmgr->tdlock);
} else {
/* taskdesc transition */
- mutex_lock(&graphmgr->tdlock);
- __vipx_taskdesc_trans_alc_to_pro(graphmgr, taskdesc);
- mutex_unlock(&graphmgr->tdlock);
+ mutex_lock(&gmgr->tdlock);
+ __vipx_taskdesc_trans_alc_to_pro(gmgr, taskdesc);
+ mutex_unlock(&gmgr->tdlock);
}
/* itask cleanup */
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
vipx_task_trans_com_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
break;
case VIPX_TASK_PROCESS:
- task_index = itask->findex;
taskdesc_index = itask->tdindex;
-
if (taskdesc_index >= VIPX_MAX_TASKDESC) {
- vipx_err("taskdesc index(%d) is invalid\n", taskdesc_index);
- BUG();
+ vipx_err("taskdesc index(%u) is invalid (%u)\n",
+ taskdesc_index, VIPX_MAX_TASKDESC);
+ return;
}
- if (task_index >= VIPX_MAX_TASK) {
- vipx_err("task index(%d) is invalid\n", task_index);
- BUG();
- }
-
- taskdesc = &graphmgr->taskdesc[taskdesc_index];
+ taskdesc = &gmgr->taskdesc[taskdesc_index];
if (taskdesc->state == VIPX_TASKDESC_STATE_FREE) {
- vipx_err("taskdesc state is FREE(%d)\n", taskdesc->state);
- vipx_taskdesc_print(graphmgr);
+ vipx_err("taskdesc(%u) state is FREE\n",
+ taskdesc_index);
/* itask cleanup */
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
vipx_task_trans_com_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
break;
}
if (taskdesc->state != VIPX_TASKDESC_STATE_COMPLETE) {
- vipx_err("taskdesc state is invalid(%d)\n", taskdesc->state);
- vipx_taskdesc_print(graphmgr);
- BUG();
+ vipx_err("taskdesc state is not COMPLETE (%u)\n",
+ taskdesc->state);
+ return;
}
graph = taskdesc->graph;
if (!graph) {
- vipx_err("graph is NULL(%d)\n", taskdesc_index);
- BUG();
+ vipx_err("graph is NULL (%u)\n", taskdesc_index);
+ return;
}
task = taskdesc->task;
if (!task) {
- vipx_err("task is NULL(%d)\n", taskdesc_index);
- BUG();
+ vipx_err("task is NULL (%u)\n", taskdesc_index);
+ return;
}
task->message = itask->message;
task->param0 = itask->param2;
task->param1 = itask->param3;
- ret = CALL_GOPS(graph, done, task);
- if (ret) {
- vipx_err("CALL_GOPS(done) is fail(%d)\n", ret);
- BUG();
- }
+ ret = graph->gops->done(graph, task);
+ if (ret)
+ return;
/* taskdesc cleanup */
taskdesc->graph = NULL;
taskdesc->task = NULL;
- __vipx_taskdesc_trans_com_to_fre(graphmgr, taskdesc);
+ __vipx_taskdesc_trans_com_to_fre(gmgr, taskdesc);
/* itask cleanup */
- taskmgr_e_barrier_irqs(itaskmgr, 0, flag);
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
vipx_task_trans_com_to_fre(itaskmgr, itask);
- taskmgr_x_barrier_irqr(itaskmgr, 0, flag);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
break;
default:
- BUG();
- break;
+ vipx_err("message of task is invalid (%d)\n", itask->message);
+ return;
}
- __vipx_graphmgr_sched(graphmgr);
- vipx_dbg("[%s:%d]\n", __func__, __LINE__);
+ __vipx_graphmgr_sched(gmgr);
+ vipx_leave();
}
-int vipx_graphmgr_grp_register(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph)
+void vipx_taskdesc_print(struct vipx_graphmgr *gmgr)
{
- int ret = 0;
- u32 index;
-
- BUG_ON(!graphmgr);
- BUG_ON(!graph);
+ vipx_enter();
+ vipx_leave();
+}
- mutex_lock(&graphmgr->mlock);
- for (index = 0; index < VIPX_MAX_GRAPH; index++) {
- if (!graphmgr->graph[index]) {
- graphmgr->graph[index] = graph;
+int vipx_graphmgr_grp_register(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph)
+{
+ int ret;
+ unsigned int index;
+
+ vipx_enter();
+ mutex_lock(&gmgr->mlock);
+ for (index = 0; index < VIPX_MAX_GRAPH; ++index) {
+ if (!gmgr->graph[index]) {
+ gmgr->graph[index] = graph;
graph->idx = index;
break;
}
}
- mutex_unlock(&graphmgr->mlock);
+ mutex_unlock(&gmgr->mlock);
if (index >= VIPX_MAX_GRAPH) {
- vipx_err("graph slot is lack\n");
ret = -EINVAL;
+ vipx_err("graph slot is lack\n");
goto p_err;
}
kthread_init_work(&graph->control.work, vipx_graph_thread);
for (index = 0; index < VIPX_MAX_TASK; ++index)
- kthread_init_work(&graph->taskmgr.task[index].work, vipx_graph_thread);
+ kthread_init_work(&graph->taskmgr.task[index].work,
+ vipx_graph_thread);
- graph->global_lock = &graphmgr->mlock;
- atomic_inc(&graphmgr->active_cnt);
+ graph->global_lock = &gmgr->mlock;
+ atomic_inc(&gmgr->active_cnt);
+ vipx_leave();
+ return 0;
p_err:
- vipx_info("[%s:%d]\n", __func__, __LINE__);
return ret;
}
-int vipx_graphmgr_grp_unregister(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph)
+int vipx_graphmgr_grp_unregister(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ vipx_enter();
+ mutex_lock(&gmgr->mlock);
+ gmgr->graph[graph->idx] = NULL;
+ mutex_unlock(&gmgr->mlock);
+ atomic_dec(&gmgr->active_cnt);
+ vipx_leave();
+ return 0;
+}
- BUG_ON(!graphmgr);
- BUG_ON(!graph);
+int vipx_graphmgr_grp_start(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph)
+{
+ int ret;
- mutex_lock(&graphmgr->mlock);
- graphmgr->graph[graph->idx] = NULL;
- mutex_unlock(&graphmgr->mlock);
+ vipx_enter();
+ mutex_lock(&gmgr->mlock);
+ if (!test_bit(VIPX_GRAPHMGR_ENUM, &gmgr->state)) {
+ ret = __vipx_graphmgr_itf_init(gmgr->interface, graph);
+ if (ret) {
+ mutex_unlock(&gmgr->mlock);
+ goto p_err_init;
+ }
+ set_bit(VIPX_GRAPHMGR_ENUM, &gmgr->state);
+ }
+ mutex_unlock(&gmgr->mlock);
- atomic_dec(&graphmgr->active_cnt);
+ ret = __vipx_graphmgr_itf_create(gmgr->interface, graph);
+ if (ret)
+ goto p_err_create;
- vipx_info("[%s:%d]\n", __func__, __LINE__);
+ ret = __vipx_graphmgr_itf_config(gmgr->interface, graph);
+ if (ret)
+ goto p_err_config;
+
+ vipx_leave();
+ return 0;
+p_err_config:
+p_err_create:
+p_err_init:
return ret;
}
-int vipx_graphmgr_grp_start(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph)
+int vipx_graphmgr_grp_stop(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph)
{
- int ret = 0;
+ int ret;
- mutex_lock(&graphmgr->mlock);
- if (test_bit(VIPX_GRAPHMGR_ENUM, &graphmgr->state)) {
- mutex_unlock(&graphmgr->mlock);
- goto p_skip_hw_init;
- }
+ vipx_enter();
+ ret = __vipx_graphmgr_itf_destroy(gmgr->interface, graph);
+ if (ret)
+ goto p_err_destroy;
- ret = __vipx_itf_init(graphmgr->interface, graph);
- if (ret) {
- mutex_unlock(&graphmgr->mlock);
- vipx_err("__vipx_itf_init is fail(%d)\n", ret);
- goto p_err;
+ mutex_lock(&gmgr->mlock);
+ if (test_bit(VIPX_GRAPHMGR_ENUM, &gmgr->state) &&
+ atomic_read(&gmgr->active_cnt) == 1) {
+ ret = __vipx_graphmgr_itf_deinit(gmgr->interface, graph);
+ if (ret) {
+ mutex_unlock(&gmgr->mlock);
+ goto p_err_deinit;
+ }
+ clear_bit(VIPX_GRAPHMGR_ENUM, &gmgr->state);
}
+ mutex_unlock(&gmgr->mlock);
- set_bit(VIPX_GRAPHMGR_ENUM, &graphmgr->state);
- mutex_unlock(&graphmgr->mlock);
+ vipx_leave();
+ return 0;
+p_err_deinit:
+p_err_destroy:
+ return ret;
+}
-p_skip_hw_init:
-#if 0
- ret = __vipx_itf_create(graphmgr->interface, graph);
- if (ret) {
- vipx_err("__vipx_itf_create is fail(%d)\n", ret);
- goto p_err;
- }
-#endif
- ret = __vipx_itf_config(graphmgr->interface, graph);
- if (ret) {
- vipx_err("__vipx_itf_config is fail(%d)\n", ret);
- goto p_err;
- }
+void vipx_graphmgr_queue(struct vipx_graphmgr *gmgr, struct vipx_task *task)
+{
+ vipx_enter();
+ kthread_queue_work(&gmgr->worker, &task->work);
+ vipx_leave();
+}
-p_err:
- vipx_info("[%s:%d]\n", __func__, __LINE__);
- return ret;
+static bool __vipx_graphmgr_check_model_id(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel)
+{
+ unsigned int current_id, id;
+ bool same;
+
+ vipx_enter();
+ current_id = GET_COMMON_GRAPH_MODEL_ID(gmgr->current_model->id);
+ id = GET_COMMON_GRAPH_MODEL_ID(gmodel->id);
+
+ if (current_id == id)
+ same = true;
+ else
+ same = false;
+
+ vipx_leave();
+ return same;
}
-int vipx_graphmgr_grp_stop(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph)
+static int __vipx_graphmgr_itf_load_graph(struct vipx_interface *itf,
+ struct vipx_graph_model *gmodel)
{
- int ret = 0;
+ int ret;
+ unsigned long flags;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_task *itask;
- ret = __vipx_itf_destroy(graphmgr->interface, graph);
- if (ret) {
- vipx_err("__vipx_itf_destroy is fail(%d)\n", ret);
- goto p_err;
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vipx_err("itask is NULL\n");
+ goto p_err_pick;
}
- mutex_lock(&graphmgr->mlock);
- if (test_bit(VIPX_GRAPHMGR_ENUM, &graphmgr->state) &&
- atomic_read(&graphmgr->active_cnt) == 1) {
- ret = __vipx_itf_deinit(graphmgr->interface, graph);
- if (ret) {
- mutex_unlock(&graphmgr->mlock);
- vipx_err("__vipx_itf_deinit is fail(%d)\n", ret);
+ itask->id = GET_COMMON_GRAPH_MODEL_ID(gmodel->id);
+ itask->message = VIPX_TASK_PROCESS;
+ itask->param0 = (unsigned long)&gmodel->common_ginfo;
+
+ ret = vipx_hw_load_graph(itf, itask);
+ if (ret)
+ goto p_err_hw_load_graph;
+
+ vipx_leave();
+ return 0;
+p_err_hw_load_graph:
+p_err_pick:
+ return ret;
+}
+
+int vipx_graphmgr_register_model(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel)
+{
+ int ret;
+
+ vipx_enter();
+ mutex_lock(&gmgr->mlock);
+ if (gmgr->current_model) {
+ if (!__vipx_graphmgr_check_model_id(gmgr, gmodel)) {
+ ret = -EBUSY;
+ vipx_err("model is already registered(%#x/%#x)\n",
+ gmgr->current_model->id, gmodel->id);
+ mutex_unlock(&gmgr->mlock);
goto p_err;
}
-
- clear_bit(VIPX_GRAPHMGR_ENUM, &graphmgr->state);
}
- mutex_unlock(&graphmgr->mlock);
+ gmgr->current_model = gmodel;
+ mutex_unlock(&gmgr->mlock);
+
+ ret = __vipx_graphmgr_itf_load_graph(gmgr->interface, gmodel);
+ if (ret)
+ goto p_err_register;
+
+ vipx_leave();
+ return 0;
+p_err_register:
+ mutex_lock(&gmgr->mlock);
+ gmgr->current_model = NULL;
+ mutex_unlock(&gmgr->mlock);
p_err:
- vipx_info("[%s:%d]\n", __func__, __LINE__);
return ret;
}
-int vipx_graphmgr_itf_register(struct vipx_graphmgr *graphmgr, struct vipx_interface *interface)
+static int __vipx_graphmgr_itf_unload_graph(struct vipx_interface *itf,
+ struct vipx_graph_model *gmodel)
{
- int ret = 0;
- u32 index;
+ int ret;
+ unsigned long flags;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_task *itask;
- BUG_ON(!graphmgr);
- BUG_ON(!interface);
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
- graphmgr->interface = interface;
- for (index = 0; index < VIPX_MAX_TASK; ++index)
- kthread_init_work(&interface->taskmgr.task[index].work, vipx_interface_thread);
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vipx_err("itask is NULL\n");
+ goto p_err_pick;
+ }
+
+ itask->id = GET_COMMON_GRAPH_MODEL_ID(gmodel->id);
+ itask->message = VIPX_TASK_PROCESS;
+ itask->param0 = (unsigned long)&gmodel->common_ginfo;
- vipx_info("[%s:%d]\n", __func__, __LINE__);
+ ret = vipx_hw_unload_graph(itf, itask);
+ if (ret)
+ goto p_err_hw_unload_graph;
+
+ vipx_leave();
+ return 0;
+p_err_hw_unload_graph:
+p_err_pick:
return ret;
}
-int vipx_graphmgr_itf_unregister(struct vipx_graphmgr *graphmgr, struct vipx_interface *interface)
+int vipx_graphmgr_unregister_model(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel)
{
- int ret = 0;
- BUG_ON(!graphmgr);
- BUG_ON(!interface);
+ int ret;
+ struct vipx_graph_model *current_gmodel;
+
+ vipx_enter();
+ mutex_lock(&gmgr->mlock);
+ if (!gmgr->current_model) {
+ ret = -EFAULT;
+ vipx_err("model of manager is empty (%#x)\n", gmodel->id);
+ mutex_unlock(&gmgr->mlock);
+ goto p_err;
+ }
+
+ if (!__vipx_graphmgr_check_model_id(gmgr, gmodel)) {
+ ret = -EINVAL;
+ vipx_err("model of manager is different (%#x/%#x)\n",
+ gmgr->current_model->id, gmodel->id);
+ mutex_unlock(&gmgr->mlock);
+ goto p_err;
+ }
- graphmgr->interface = NULL;
+ current_gmodel = gmgr->current_model;
+ gmgr->current_model = NULL;
+ mutex_unlock(&gmgr->mlock);
- vipx_info("[%s:%d]\n", __func__, __LINE__);
+ ret = __vipx_graphmgr_itf_unload_graph(gmgr->interface, current_gmodel);
+ if (ret)
+ goto p_err_unregister;
+
+ vipx_leave();
+ return 0;
+p_err_unregister:
+p_err:
return ret;
}
-void vipx_graphmgr_queue(struct vipx_graphmgr *graphmgr, struct vipx_task *task)
+static int __vipx_graphmgr_itf_execute_graph(struct vipx_interface *itf,
+ struct vipx_graph_model *gmodel,
+ struct vipx_common_execute_info *einfo)
{
- BUG_ON(!graphmgr);
- BUG_ON(!task);
+ int ret;
+ unsigned long flags;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_task *itask;
+
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vipx_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vipx_err("itask is NULL\n");
+ goto p_err_pick;
+ }
+
+ itask->id = GET_COMMON_GRAPH_MODEL_ID(gmodel->id);
+ itask->message = VIPX_TASK_PROCESS;
+ itask->param0 = (unsigned long)einfo;
+ itask->param1 = (unsigned long)gmodel;
+
+ ret = vipx_hw_execute_graph(itf, itask);
+ if (ret)
+ goto p_err_hw_load_graph;
- kthread_queue_work(&graphmgr->worker, &task->work);
- vipx_dbg("[%s:%d]\n", __func__, __LINE__);
+ vipx_leave();
+ return 0;
+p_err_hw_load_graph:
+p_err_pick:
+ return ret;
}
-int vipx_graphmgr_probe(struct vipx_graphmgr *graphmgr)
+int vipx_graphmgr_execute_model(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel,
+ struct vipx_common_execute_info *einfo)
{
- int ret = 0;
- u32 index;
- struct vipx_device *device = container_of(graphmgr, struct vipx_device, graphmgr);
-
- BUG_ON(!graphmgr);
- BUG_ON(!device);
-
- graphmgr->tick_cnt = 0;
- graphmgr->tick_pos = 0;
- graphmgr->sched_cnt = 0;
- graphmgr->sched_pos = 0;
- graphmgr->task_graph = NULL;
-#ifdef USE_TASK_TIMER
- graphmgr->task_timer = NULL;
-#endif
- atomic_set(&graphmgr->active_cnt, 0);
- mutex_init(&graphmgr->mlock);
- mutex_init(&graphmgr->tdlock);
- clear_bit(VIPX_GRAPHMGR_OPEN, &graphmgr->state);
- clear_bit(VIPX_GRAPHMGR_ENUM, &graphmgr->state);
-
- for (index = 0; index < VIPX_MAX_GRAPH; ++index)
- graphmgr->graph[index] = NULL;
-
- INIT_LIST_HEAD(&graphmgr->tdfre_list);
- INIT_LIST_HEAD(&graphmgr->tdrdy_list);
- INIT_LIST_HEAD(&graphmgr->tdreq_list);
- INIT_LIST_HEAD(&graphmgr->tdalc_list);
- INIT_LIST_HEAD(&graphmgr->tdpro_list);
- INIT_LIST_HEAD(&graphmgr->tdcom_list);
-
- graphmgr->tdfre_cnt = 0;
- graphmgr->tdrdy_cnt = 0;
- graphmgr->tdreq_cnt = 0;
- graphmgr->tdalc_cnt = 0;
- graphmgr->tdpro_cnt = 0;
- graphmgr->tdcom_cnt = 0;
-
- for (index = 0; index < VIPX_MAX_TASKDESC; ++index) {
- graphmgr->taskdesc[index].index = index;
- graphmgr->taskdesc[index].graph = NULL;
- graphmgr->taskdesc[index].task = NULL;
- graphmgr->taskdesc[index].state = VIPX_TASKDESC_STATE_INVALID;
- __vipx_taskdesc_s_free(graphmgr, &graphmgr->taskdesc[index]);
+ int ret;
+
+ vipx_enter();
+ mutex_lock(&gmgr->mlock);
+ if (gmgr->current_model) {
+ if (!__vipx_graphmgr_check_model_id(gmgr, gmodel)) {
+ ret = -EBUSY;
+ vipx_err("other model is registered(%#x/%#x)\n",
+ gmgr->current_model->id, gmodel->id);
+ mutex_unlock(&gmgr->mlock);
+ goto p_err;
+ }
}
- vipx_info("[%s:%d]\n", __func__, __LINE__);
+ gmgr->current_model = gmodel;
+ mutex_unlock(&gmgr->mlock);
+
+ ret = __vipx_graphmgr_itf_execute_graph(gmgr->interface, gmodel, einfo);
+ if (ret)
+ goto p_err_register;
+
+ vipx_leave();
+ return 0;
+p_err_register:
+ mutex_lock(&gmgr->mlock);
+ gmgr->current_model = NULL;
+ mutex_unlock(&gmgr->mlock);
+p_err:
return ret;
}
-int vipx_graphmgr_open(struct vipx_graphmgr *graphmgr)
+
+int vipx_graphmgr_open(struct vipx_graphmgr *gmgr)
{
- int ret = 0;
+ int ret;
char name[30];
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
- BUG_ON(!graphmgr);
-
- kthread_init_worker(&graphmgr->worker);
+ vipx_enter();
+ kthread_init_worker(&gmgr->worker);
snprintf(name, sizeof(name), "vipx_graph");
- graphmgr->task_graph = kthread_run(kthread_worker_fn, &graphmgr->worker, name);
- if (IS_ERR_OR_NULL(graphmgr->task_graph)) {
- vipx_err("kthread_run is fail\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = sched_setscheduler_nocheck(graphmgr->task_graph, SCHED_FIFO, ¶m);
- if (ret) {
- vipx_err("sched_setscheduler_nocheck is fail(%d)\n", ret);
- goto p_err;
- }
-
-#ifdef USE_TASK_TIMER
- snprintf(name, sizeof(name), "vipx_timer");
- graphmgr->task_timer = kthread_run(vipx_time_thread, graphmgr, name);
- if (IS_ERR_OR_NULL(graphmgr->task_timer)) {
+ gmgr->graph_task = kthread_run(kthread_worker_fn,
+ &gmgr->worker, name);
+ if (IS_ERR(gmgr->graph_task)) {
+ ret = PTR_ERR(gmgr->graph_task);
vipx_err("kthread_run is fail\n");
- ret = -EINVAL;
- goto p_err;
+ goto p_err_run;
}
- ret = sched_setscheduler_nocheck(graphmgr->task_timer, SCHED_FIFO, ¶m);
+ ret = sched_setscheduler_nocheck(gmgr->graph_task,
+ SCHED_FIFO, ¶m);
if (ret) {
vipx_err("sched_setscheduler_nocheck is fail(%d)\n", ret);
- goto p_err;
+ goto p_err_sched;
}
-#endif
-
- set_bit(VIPX_GRAPHMGR_OPEN, &graphmgr->state);
-p_err:
- vipx_info("[%s:%d]\n", __func__, __LINE__);
+ gmgr->current_model = NULL;
+ set_bit(VIPX_GRAPHMGR_OPEN, &gmgr->state);
+ vipx_leave();
+ return 0;
+p_err_sched:
+ kthread_stop(gmgr->graph_task);
+p_err_run:
return ret;
}
-int vipx_graphmgr_close(struct vipx_graphmgr *graphmgr)
+int vipx_graphmgr_close(struct vipx_graphmgr *gmgr)
{
- int ret = 0;
+ vipx_enter();
+ kthread_stop(gmgr->graph_task);
+ clear_bit(VIPX_GRAPHMGR_OPEN, &gmgr->state);
+ clear_bit(VIPX_GRAPHMGR_ENUM, &gmgr->state);
+ vipx_leave();
+ return 0;
+}
-#ifdef USE_TASK_TIMER
- kthread_stop(graphmgr->task_timer);
-#endif
- kthread_stop(graphmgr->task_graph);
+int vipx_graphmgr_probe(struct vipx_system *sys)
+{
+ struct vipx_graphmgr *gmgr;
+ int index;
- clear_bit(VIPX_GRAPHMGR_OPEN, &graphmgr->state);
- clear_bit(VIPX_GRAPHMGR_ENUM, &graphmgr->state);
+ vipx_enter();
+ gmgr = &sys->graphmgr;
+ gmgr->interface = &sys->interface;
- vipx_info("[%s:%d]\n", __func__, __LINE__);
- return ret;
+ atomic_set(&gmgr->active_cnt, 0);
+ mutex_init(&gmgr->mlock);
+ mutex_init(&gmgr->tdlock);
+
+ INIT_LIST_HEAD(&gmgr->tdfre_list);
+ INIT_LIST_HEAD(&gmgr->tdrdy_list);
+ INIT_LIST_HEAD(&gmgr->tdreq_list);
+ INIT_LIST_HEAD(&gmgr->tdalc_list);
+ INIT_LIST_HEAD(&gmgr->tdpro_list);
+ INIT_LIST_HEAD(&gmgr->tdcom_list);
+
+ for (index = 0; index < VIPX_MAX_TASKDESC; ++index) {
+ gmgr->taskdesc[index].index = index;
+ __vipx_taskdesc_set_free(gmgr, &gmgr->taskdesc[index]);
+ }
+
+ for (index = 0; index < VIPX_MAX_TASK; ++index)
+ kthread_init_work(&gmgr->interface->taskmgr.task[index].work,
+ vipx_interface_thread);
+
+ return 0;
+}
+
+void vipx_graphmgr_remove(struct vipx_graphmgr *gmgr)
+{
+ mutex_destroy(&gmgr->tdlock);
+ mutex_destroy(&gmgr->mlock);
}
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
-#ifndef VIPX_GRAPHMGR_H_
-#define VIPX_GRAPHMGR_H_
+#ifndef __VIPX_GRAPHMGR_H___
+#define __VIPX_GRAPHMGR_H___
#include <linux/kthread.h>
#include <linux/types.h>
+#include "vipx-config.h"
#include "vipx-taskmgr.h"
#include "vipx-graph.h"
#include "vipx-interface.h"
-#define VIPX_MAX_TASKDESC (VIPX_MAX_GRAPH * 2)
+struct vipx_system;
enum vipx_taskdesc_state {
- VIPX_TASKDESC_STATE_INVALID,
VIPX_TASKDESC_STATE_FREE,
VIPX_TASKDESC_STATE_READY,
VIPX_TASKDESC_STATE_REQUEST,
};
struct vipx_taskdesc {
- struct list_head list;
- u32 index;
- u32 priority;
- struct vipx_graph *graph;
- struct vipx_task *task;
- u32 state;
+ struct list_head list;
+ unsigned int index;
+ unsigned int priority;
+ struct vipx_graph *graph;
+ struct vipx_task *task;
+ unsigned int state;
};
enum vipx_graphmgr_state {
};
struct vipx_graphmgr {
- struct vipx_graph *graph[VIPX_MAX_GRAPH];
- atomic_t active_cnt;
- unsigned long state;
- struct mutex mlock;
-
- u32 tick_cnt;
- u32 tick_pos;
- u32 sched_cnt;
- u32 sched_pos;
- struct kthread_worker worker;
- struct task_struct *task_graph;
- struct task_struct *task_timer;
-
- struct vipx_interface *interface;
-
- struct mutex tdlock;
- struct vipx_taskdesc taskdesc[VIPX_MAX_TASKDESC];
- struct list_head tdfre_list;
- struct list_head tdrdy_list;
- struct list_head tdreq_list;
- struct list_head tdalc_list;
- struct list_head tdpro_list;
- struct list_head tdcom_list;
- u32 tdfre_cnt;
- u32 tdrdy_cnt;
- u32 tdreq_cnt;
- u32 tdalc_cnt;
- u32 tdpro_cnt;
- u32 tdcom_cnt;
+ struct vipx_graph *graph[VIPX_MAX_GRAPH];
+ atomic_t active_cnt;
+ unsigned long state;
+ struct mutex mlock;
+
+ unsigned int tick_cnt;
+ unsigned int tick_pos;
+ unsigned int sched_cnt;
+ unsigned int sched_pos;
+ struct kthread_worker worker;
+ struct task_struct *graph_task;
+
+ struct vipx_interface *interface;
+
+ struct mutex tdlock;
+ struct vipx_taskdesc taskdesc[VIPX_MAX_TASKDESC];
+ struct list_head tdfre_list;
+ struct list_head tdrdy_list;
+ struct list_head tdreq_list;
+ struct list_head tdalc_list;
+ struct list_head tdpro_list;
+ struct list_head tdcom_list;
+ unsigned int tdfre_cnt;
+ unsigned int tdrdy_cnt;
+ unsigned int tdreq_cnt;
+ unsigned int tdalc_cnt;
+ unsigned int tdpro_cnt;
+ unsigned int tdcom_cnt;
+
+ struct vipx_graph_model *current_model;
};
-void vipx_taskdesc_print(struct vipx_graphmgr *graphmgr);
-int vipx_graphmgr_probe(struct vipx_graphmgr *graphmgr);
-int vipx_graphmgr_open(struct vipx_graphmgr *graphmgr);
-int vipx_graphmgr_close(struct vipx_graphmgr *graphmgr);
-int vipx_graphmgr_grp_register(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph);
-int vipx_graphmgr_grp_unregister(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph);
-int vipx_graphmgr_grp_start(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph);
-int vipx_graphmgr_grp_stop(struct vipx_graphmgr *graphmgr, struct vipx_graph *graph);
-int vipx_graphmgr_itf_register(struct vipx_graphmgr *graphmgr, struct vipx_interface *interface);
-int vipx_graphmgr_itf_unregister(struct vipx_graphmgr *graphmgr, struct vipx_interface *interface);
-void vipx_graphmgr_queue(struct vipx_graphmgr *graphmgr, struct vipx_task *task);
+void vipx_taskdesc_print(struct vipx_graphmgr *gmgr);
+int vipx_graphmgr_grp_register(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph);
+int vipx_graphmgr_grp_unregister(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph);
+int vipx_graphmgr_grp_start(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph);
+int vipx_graphmgr_grp_stop(struct vipx_graphmgr *gmgr,
+ struct vipx_graph *graph);
+void vipx_graphmgr_queue(struct vipx_graphmgr *gmgr, struct vipx_task *task);
+
+int vipx_graphmgr_register_model(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel);
+int vipx_graphmgr_unregister_model(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel);
+int vipx_graphmgr_execute_model(struct vipx_graphmgr *gmgr,
+ struct vipx_graph_model *gmodel,
+ struct vipx_common_execute_info *einfo);
+
+int vipx_graphmgr_open(struct vipx_graphmgr *gmgr);
+int vipx_graphmgr_close(struct vipx_graphmgr *gmgr);
+
+int vipx_graphmgr_probe(struct vipx_system *sys);
+void vipx_graphmgr_remove(struct vipx_graphmgr *gmgr);
+
#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include "vipx-log.h"
+#include "vipx-device.h"
+#include "vipx-binary.h"
+#include "vipx-queue.h"
+#include "vipx-graphmgr.h"
+#include "vipx-memory.h"
+#include "vipx-mailbox.h"
+#include "vipx-message.h"
+#include "vipx-kernel-binary.h"
+#include "vipx-system.h"
+#include "vipx-interface.h"
+
+#define enter_request_barrier(task) mutex_lock(task->lock)
+#define exit_request_barrier(task) mutex_unlock(task->lock)
+#define enter_process_barrier(itf) spin_lock_irq(&itf->process_barrier)
+#define exit_process_barrier(itf) spin_unlock_irq(&itf->process_barrier)
+
+static inline void __vipx_interface_set_reply(struct vipx_interface *itf,
+ unsigned int gidx)
+{
+ vipx_enter();
+ itf->reply[gidx].valid = 1;
+ wake_up(&itf->reply_wq);
+ vipx_leave();
+}
+
+static inline void __vipx_interface_clear_reply(struct vipx_interface *itf,
+ unsigned int gidx)
+{
+ vipx_enter();
+ itf->reply[gidx].valid = 0;
+ vipx_leave();
+}
+
+static void __vipx_interface_send_interrupt(struct vipx_interface *itf)
+{
+ struct vipx_system *sys;
+ int try_count = 100;
+ unsigned int val;
+
+ vipx_enter();
+ sys = itf->system;
+
+ /* Check interrupt clear */
+ while (try_count) {
+ val = sys->ctrl_ops->get_irq(sys, IRQ1_TO_DEVICE);
+ if (!val)
+ break;
+
+ vipx_warn("irq1(0x%x) is not yet clear (%u)\n", val, try_count);
+ udelay(1);
+ try_count--;
+ }
+
+ if (!val)
+ sys->ctrl_ops->set_irq(sys, IRQ1_TO_DEVICE, 0x100);
+ else
+ vipx_err("Failed to send interrupt(0x%x)\n", val);
+
+ vipx_leave();
+}
+
+int vipx_hw_wait_bootup(struct vipx_interface *itf)
+{
+ int ret;
+ int try_cnt = 1000;
+
+ vipx_enter();
+ while (!test_bit(VIPX_ITF_STATE_BOOTUP, &itf->state)) {
+ if (!try_cnt)
+ break;
+
+ udelay(100);
+ try_cnt--;
+ }
+
+ if (!test_bit(VIPX_ITF_STATE_BOOTUP, &itf->state)) {
+ vipx_debug_write_log_binary();
+ vipx_debug_dump_debug_regs();
+ ret = -ETIMEDOUT;
+ vipx_err("Failed to boot CM7 (%d)\n", ret);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vipx_hw_config(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+int vipx_hw_process(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+int vipx_hw_create(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+int vipx_hw_destroy(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+int vipx_hw_init(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+int vipx_hw_deinit(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
+
+static int __vipx_interface_wait_mailbox_reply(struct vipx_interface *itf,
+ struct vipx_task *itask)
+{
+ int ret;
+ long timeout;
+
+ vipx_enter();
+ timeout = wait_event_timeout(itf->reply_wq,
+ (itask->message == VIPX_TASK_DONE),
+ VIPX_COMMAND_TIMEOUT);
+ if (!timeout) {
+ vipx_debug_write_log_binary();
+ vipx_debug_dump_debug_regs();
+ ret = -ETIMEDOUT;
+ vipx_err("wait time(%u ms) is expired (%u)\n",
+ jiffies_to_msecs(VIPX_COMMAND_TIMEOUT),
+ itask->id);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vipx_interface_send_mailbox(struct vipx_interface *itf,
+ struct vipx_task *itask)
+{
+ int ret;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_mailbox_ctrl *mctrl;
+ void *msg;
+ unsigned long size, type;
+ unsigned long flags;
+
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
+ mctrl = itf->mctrl;
+
+ msg = (void *)itask->param1;
+ size = itask->param2;
+ type = itask->param3;
+
+ enter_process_barrier(itf);
+ itf->process = itask;
+
+ ret = 0;
+#ifndef TEMP_RT_FRAMEWORK_TEST
+ ret = vipx_mailbox_check_full(mctrl, type, MAILBOX_WAIT);
+#endif
+ if (ret) {
+ vipx_err("mailbox is full (%lu/%u)\n", type, itask->id);
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vipx_task_trans_req_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ goto p_err_process;
+ }
+
+#ifndef TEMP_RT_FRAMEWORK_TEST
+ ret = vipx_mailbox_write(mctrl, type, msg, size);
+#endif
+ if (ret) {
+ vipx_err("Failed to write to mailbox (%lu/%u)\n",
+ type, itask->id);
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vipx_task_trans_req_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ goto p_err_process;
+ }
+
+#ifndef TEMP_RT_FRAMEWORK_TEST
+ __vipx_interface_send_interrupt(itf);
+#endif
+ exit_process_barrier(itf);
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vipx_task_trans_req_to_pro(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ itask->message = VIPX_TASK_DONE;
+#endif
+ ret = __vipx_interface_wait_mailbox_reply(itf, itask);
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vipx_task_trans_pro_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ enter_process_barrier(itf);
+ itf->process = NULL;
+ exit_process_barrier(itf);
+
+ vipx_leave();
+ return ret;
+p_err_process:
+ itf->process = NULL;
+ exit_process_barrier(itf);
+ return ret;
+}
+
+int vipx_hw_load_graph(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ int ret;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_h2d_message msg;
+ struct vipx_h2d_load_graph_req *req;
+ struct vipx_common_graph_info *ginfo;
+
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.head.mbox_version = MAILBOX_VERSION;
+ msg.head.msg_version = MESSAGE_VERSION;
+ msg.head.msg_type = NORMAL_MSG;
+ msg.head.msg_size = sizeof(*req);
+ msg.head.trans_id = itask->id;
+ msg.head.msg_id = H2D_LOAD_GRAPH_REQ;
+
+ req = &msg.body.load_graph_req;
+ ginfo = (struct vipx_common_graph_info *)itask->param0;
+ req->graph_info = *ginfo;
+
+ itask->param1 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = NORMAL_MSG;
+
+#ifdef DEBUG_LOG_REQUEST_DUMP
+ vipx_dbg("< load graph request >\n");
+ vipx_dbg("print graph info of load_graph_req\n");
+ for (ret = 0; ret < sizeof(req->graph_info) >> 2; ++ret)
+ vipx_dbg("[%3d] %#10x\n", ret, ((int *)&req->graph_info)[ret]);
+#endif
+ ret = __vipx_interface_send_mailbox(itf, itask);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vipx_hw_execute_graph(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ int ret;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_h2d_message msg;
+ struct vipx_h2d_execute_req *req;
+ struct vipx_common_execute_info *einfo;
+ struct vipx_graph_model *gmodel;
+ struct vipx_kernel_binary *kbin, *temp;
+ int idx = 0;
+
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.head.mbox_version = MAILBOX_VERSION;
+ msg.head.msg_version = MESSAGE_VERSION;
+ msg.head.msg_type = NORMAL_MSG;
+ msg.head.msg_size = sizeof(*req);
+ msg.head.trans_id = itask->id;
+ msg.head.msg_id = H2D_EXECUTE_REQ;
+
+ req = &msg.body.execute_req;
+ einfo = (struct vipx_common_execute_info *)itask->param0;
+ req->execute_info = *einfo;
+
+ gmodel = (struct vipx_graph_model *)itask->param1;
+ req->num_kernel_bin = gmodel->kbin_count;
+ //TODO check max count
+ list_for_each_entry_safe(kbin, temp, &gmodel->kbin_list, glist) {
+ req->kernel_bin[idx].addr = kbin->buffer.dvaddr;
+ req->kernel_bin[idx].size = kbin->buffer.size;
+ idx++;
+ }
+
+ itask->param1 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = NORMAL_MSG;
+
+#ifdef DEBUG_LOG_REQUEST_DUMP
+ vipx_dbg("< execute graph request >\n");
+ vipx_dbg("print kernel_bin and execute_info of execute_graph_req\n");
+ vipx_dbg("num_kernel_bin : %d\n", req->num_kernel_bin);
+ for (ret = 0; ret < req->num_kernel_bin; ++ret) {
+ vipx_dbg("[%3d] addr : %#10x\n",
+ ret, req->kernel_bin[ret].addr);
+ vipx_dbg("[%3d] size : %#10x\n",
+ ret, req->kernel_bin[ret].size);
+ }
+
+ vipx_dbg("gid : %d\n", req->execute_info.gid);
+ vipx_dbg("macro_sc_offset : %d\n", req->execute_info.macro_sg_offset);
+ vipx_dbg("num_input : %d\n", req->execute_info.num_input);
+ for (ret = 0; ret < req->execute_info.num_input; ++ret) {
+ int *buffer = (int *)&req->execute_info.input[ret][0];
+
+ for (idx = 0; idx < sizeof(req->execute_info.input[0][0]) >> 2;
+ ++idx) {
+ vipx_dbg("[%3d] %#10x\n", ret, buffer[idx]);
+ }
+ }
+ vipx_dbg("num_input : %d\n", req->execute_info.num_output);
+ for (ret = 0; ret < req->execute_info.num_output; ++ret) {
+ int *buffer = (int *)&req->execute_info.output[ret][0];
+
+ for (idx = 0; idx < sizeof(req->execute_info.output[0][0]) >> 2;
+ ++idx) {
+ vipx_dbg("[%3d] %#10x\n", ret, buffer[idx]);
+ }
+ }
+#endif
+ ret = __vipx_interface_send_mailbox(itf, itask);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vipx_hw_unload_graph(struct vipx_interface *itf, struct vipx_task *itask)
+{
+ int ret;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_h2d_message msg;
+ struct vipx_h2d_unload_graph_req *req;
+ struct vipx_common_graph_info *ginfo;
+
+ vipx_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.head.mbox_version = MAILBOX_VERSION;
+ msg.head.msg_version = MESSAGE_VERSION;
+ msg.head.msg_type = NORMAL_MSG;
+ msg.head.msg_size = sizeof(*req);
+ msg.head.trans_id = itask->id;
+ msg.head.msg_id = H2D_UNLOAD_GRAPH_REQ;
+
+ req = &msg.body.unload_graph_req;
+ ginfo = (struct vipx_common_graph_info *)itask->param0;
+ req->gid = ginfo->gid;
+
+ itask->param1 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = NORMAL_MSG;
+
+#ifdef DEBUG_LOG_REQUEST_DUMP
+ vipx_dbg("< unload graph request >\n");
+ vipx_dbg("print gid of unload_graph_req\n");
+ vipx_dbg("[gid] %#10x\n", req->gid);
+#endif
+ ret = __vipx_interface_send_mailbox(itf, itask);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_interface_work_list_queue_free(
+ struct vipx_work_list *work_list, struct vipx_work *work)
+{
+ unsigned long flags;
+
+ vipx_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ list_add_tail(&work->list, &work_list->free_head);
+ work_list->free_cnt++;
+ spin_unlock_irqrestore(&work_list->slock, flags);
+ vipx_leave();
+}
+
+static struct vipx_work *__vipx_interface_work_list_dequeue_free(
+ struct vipx_work_list *work_list)
+{
+ unsigned long flags;
+ struct vipx_work *work;
+
+ vipx_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ if (work_list->free_cnt) {
+ work = list_first_entry(&work_list->free_head, struct vipx_work,
+ list);
+ list_del(&work->list);
+ work_list->free_cnt--;
+ } else {
+ work = NULL;
+ vipx_err("free work list empty\n");
+ }
+ spin_unlock_irqrestore(&work_list->slock, flags);
+
+ vipx_leave();
+ return work;
+}
+
+static void __vipx_interface_work_list_queue_reply(
+ struct vipx_work_list *work_list, struct vipx_work *work)
+{
+ unsigned long flags;
+
+ vipx_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ list_add_tail(&work->list, &work_list->reply_head);
+ work_list->reply_cnt++;
+ spin_unlock_irqrestore(&work_list->slock, flags);
+ vipx_leave();
+}
+
+static struct vipx_work *__vipx_interface_work_list_dequeue_reply(
+ struct vipx_work_list *work_list)
+{
+ unsigned long flags;
+ struct vipx_work *work;
+
+ vipx_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ if (work_list->reply_cnt) {
+ work = list_first_entry(&work_list->reply_head,
+ struct vipx_work, list);
+ list_del(&work->list);
+ work_list->reply_cnt--;
+ } else {
+ work = NULL;
+ }
+ spin_unlock_irqrestore(&work_list->slock, flags);
+
+ vipx_leave();
+ return work;
+}
+
+static void __vipx_interface_work_list_init(struct vipx_work_list *work_list,
+ unsigned int count)
+{
+ unsigned int idx;
+
+ vipx_enter();
+ spin_lock_init(&work_list->slock);
+ INIT_LIST_HEAD(&work_list->free_head);
+ work_list->free_cnt = 0;
+ INIT_LIST_HEAD(&work_list->reply_head);
+ work_list->reply_cnt = 0;
+
+ for (idx = 0; idx < count; ++idx)
+ __vipx_interface_work_list_queue_free(work_list,
+ &work_list->work[idx]);
+
+ vipx_leave();
+}
+
+static void __vipx_interface_isr(void *data)
+{
+ struct vipx_interface *itf;
+ struct vipx_mailbox_ctrl *mctrl;
+ struct vipx_work_list *work_list;
+ struct work_struct *work_queue;
+ struct vipx_work *work;
+ struct vipx_d2h_message rsp;
+
+ vipx_enter();
+ itf = (struct vipx_interface *)data;
+ mctrl = itf->mctrl;
+ work_list = &itf->work_list;
+ work_queue = &itf->work_queue;
+
+ while (!vipx_mailbox_check_reply(mctrl, URGENT_MSG, 0)) {
+ vipx_mailbox_read(mctrl, URGENT_MSG, &rsp);
+
+
+ work = __vipx_interface_work_list_dequeue_free(work_list);
+ if (work) {
+ /* TODO */
+ work->id = rsp.head.trans_id;
+ work->message = 0;
+ work->param0 = 0;
+ work->param1 = 0;
+ work->param2 = 0;
+ work->param3 = 0;
+
+ __vipx_interface_work_list_queue_reply(work_list, work);
+
+ if (!work_pending(work_queue))
+ schedule_work(work_queue);
+ }
+ }
+
+ while (!vipx_mailbox_check_reply(mctrl, NORMAL_MSG, 0)) {
+ vipx_mailbox_read(mctrl, NORMAL_MSG, &rsp);
+
+ if (rsp.head.msg_id == D2H_BOOTUP_NTF) {
+ if (rsp.body.bootup_ntf.result == 0) {
+ vipx_dbg("CM7 bootup complete (%u)\n",
+ rsp.head.trans_id);
+ set_bit(VIPX_ITF_STATE_BOOTUP, &itf->state);
+ } else {
+ /* TODO process boot fail */
+ vipx_err("CM7 bootup failed (%u,%d)\n",
+ rsp.head.trans_id,
+ rsp.body.bootup_ntf.result);
+ }
+ break;
+ } else if (rsp.head.msg_id == D2H_LOAD_GRAPH_RSP) {
+ enter_process_barrier(itf);
+ if (!itf->process) {
+ vipx_err("process task is empty\n");
+ exit_process_barrier(itf);
+ continue;
+ }
+
+ vipx_dbg("D2H_LOAD_GRAPH_RSP : %x\n",
+ rsp.body.load_graph_rsp.result);
+ vipx_dbg("D2H_LOAD_GRAPH_RSP(graph_id) : %d\n",
+ rsp.body.load_graph_rsp.gid);
+ itf->process->message = VIPX_TASK_DONE;
+ wake_up(&itf->reply_wq);
+ exit_process_barrier(itf);
+ continue;
+ } else if (rsp.head.msg_id == D2H_EXECUTE_RSP) {
+ enter_process_barrier(itf);
+ if (!itf->process) {
+ vipx_err("process task is empty\n");
+ exit_process_barrier(itf);
+ continue;
+ }
+
+ vipx_dbg("D2H_EXECUTE_RSP : %x\n",
+ rsp.body.execute_rsp.result);
+ vipx_dbg("D2H_EXECUTE_RSP(graph_id) : %d\n",
+ rsp.body.execute_rsp.gid);
+ vipx_dbg("D2H_EXECUTE_RSP(macro) : %d\n",
+ rsp.body.execute_rsp.macro_sgid);
+ itf->process->message = VIPX_TASK_DONE;
+ wake_up(&itf->reply_wq);
+ exit_process_barrier(itf);
+ continue;
+ } else if (rsp.head.msg_id == D2H_UNLOAD_GRAPH_RSP) {
+ enter_process_barrier(itf);
+ if (!itf->process) {
+ vipx_err("process task is empty\n");
+ exit_process_barrier(itf);
+ continue;
+ }
+
+ vipx_dbg("D2H_UNLOAD_GRAPH_RSP : %x\n",
+ rsp.body.unload_graph_rsp.result);
+ vipx_dbg("D2H_UNLOAD_GRAPH_RSP(graph_id) : %d\n",
+ rsp.body.unload_graph_rsp.gid);
+ itf->process->message = VIPX_TASK_DONE;
+ wake_up(&itf->reply_wq);
+ exit_process_barrier(itf);
+ continue;
+ }
+ work = __vipx_interface_work_list_dequeue_free(work_list);
+ if (work) {
+ /* TODO */
+ work->id = rsp.head.trans_id;
+ work->message = 0;
+ work->param0 = 0;
+ work->param1 = 0;
+ work->param2 = 0;
+ work->param3 = 0;
+
+ __vipx_interface_work_list_queue_reply(work_list, work);
+
+ if (!work_pending(work_queue))
+ schedule_work(work_queue);
+ }
+ }
+
+ vipx_leave();
+}
+
+/* TODO temp code */
+extern void *vertex_interface_data;
+extern irqreturn_t vertex_interface_isr0(int irq, void *data);
+extern irqreturn_t vertex_interface_isr1(int irq, void *data);
+
+static irqreturn_t vipx_interface_isr0(int irq, void *data)
+{
+ struct vipx_interface *itf;
+ struct vipx_system *sys;
+ unsigned int val;
+
+ vipx_enter();
+ itf = (struct vipx_interface *)data;
+ sys = itf->system;
+
+ val = sys->ctrl_ops->get_irq(sys, IRQ_FROM_DEVICE);
+ if (val & 0x1) {
+ val &= ~(0x1);
+ sys->ctrl_ops->clear_irq(sys, IRQ_FROM_DEVICE, val);
+
+ if (test_bit(VIPX_ITF_STATE_OPEN, &itf->state))
+ __vipx_interface_isr(data);
+ else
+ vertex_interface_isr0(irq, vertex_interface_data);
+ }
+
+ vipx_leave();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vipx_interface_isr1(int irq, void *data)
+{
+ struct vipx_interface *itf;
+ struct vipx_system *sys;
+ unsigned int val;
+
+ vipx_enter();
+ itf = (struct vipx_interface *)data;
+ sys = itf->system;
+
+ val = sys->ctrl_ops->get_irq(sys, IRQ_FROM_DEVICE);
+ if (val & (0x1 << 0x1)) {
+ val &= ~(0x1 << 0x1);
+ sys->ctrl_ops->clear_irq(sys, IRQ_FROM_DEVICE, val);
+
+ if (test_bit(VIPX_ITF_STATE_OPEN, &itf->state))
+ __vipx_interface_isr(data);
+ else
+ vertex_interface_isr1(irq, vertex_interface_data);
+ }
+
+ vipx_leave();
+ return IRQ_HANDLED;
+}
+
+int vipx_interface_start(struct vipx_interface *itf)
+{
+ vipx_enter();
+ set_bit(VIPX_ITF_STATE_START, &itf->state);
+
+ vipx_leave();
+ return 0;
+}
+
+static void __vipx_interface_cleanup(struct vipx_interface *itf)
+{
+ struct vipx_taskmgr *taskmgr;
+ int task_count;
+
+ unsigned long flags;
+ unsigned int idx;
+ struct vipx_task *task;
+
+ vipx_enter();
+ taskmgr = &itf->taskmgr;
+
+ task_count = taskmgr->req_cnt + taskmgr->pro_cnt + taskmgr->com_cnt;
+ if (task_count) {
+ vipx_warn("count of task manager is not zero (%u/%u/%u)\n",
+ taskmgr->req_cnt, taskmgr->pro_cnt,
+ taskmgr->com_cnt);
+ /* TODO remove debug log */
+ vipx_task_print_all(&itf->taskmgr);
+
+ taskmgr_e_barrier_irqs(taskmgr, 0, flags);
+
+ for (idx = 1; idx < taskmgr->tot_cnt; ++idx) {
+ task = &taskmgr->task[idx];
+ if (task->state == VIPX_TASK_STATE_FREE)
+ continue;
+
+ vipx_task_trans_any_to_fre(taskmgr, task);
+ task_count--;
+ }
+
+ taskmgr_x_barrier_irqr(taskmgr, 0, flags);
+ }
+}
+
+int vipx_interface_stop(struct vipx_interface *itf)
+{
+ vipx_enter();
+ __vipx_interface_cleanup(itf);
+ clear_bit(VIPX_ITF_STATE_BOOTUP, &itf->state);
+ clear_bit(VIPX_ITF_STATE_START, &itf->state);
+
+ vipx_leave();
+ return 0;
+}
+
+int vipx_interface_open(struct vipx_interface *itf, void *mbox)
+{
+ int idx;
+
+ vipx_enter();
+ itf->mctrl = mbox;
+ vipx_mailbox_init(itf->mctrl);
+
+ itf->process = NULL;
+ for (idx = 0; idx < VIPX_MAX_GRAPH; ++idx) {
+ itf->request[idx] = NULL;
+ itf->reply[idx].valid = 0;
+ }
+
+ itf->done_cnt = 0;
+ itf->state = 0;
+ set_bit(VIPX_ITF_STATE_OPEN, &itf->state);
+
+ vipx_leave();
+ return 0;
+}
+
+int vipx_interface_close(struct vipx_interface *itf)
+{
+ vipx_enter();
+ clear_bit(VIPX_ITF_STATE_OPEN, &itf->state);
+ itf->mctrl = NULL;
+
+ vipx_leave();
+ return 0;
+}
+
+static void vipx_interface_work_reply_func(struct work_struct *data)
+{
+ struct vipx_interface *itf;
+ struct vipx_taskmgr *itaskmgr;
+ struct vipx_work_list *work_list;
+ struct vipx_work *work;
+ struct vipx_task *itask;
+ unsigned int gidx;
+ unsigned long flags;
+
+ vipx_enter();
+ itf = container_of(data, struct vipx_interface, work_queue);
+ itaskmgr = &itf->taskmgr;
+ work_list = &itf->work_list;
+
+ while (true) {
+ work = __vipx_interface_work_list_dequeue_reply(work_list);
+ if (!work)
+ break;
+
+ if (work->id >= VIPX_MAX_TASK) {
+ vipx_err("work id is invalid (%d)\n", work->id);
+ goto p_end;
+ }
+
+ itask = &itaskmgr->task[work->id];
+
+ if (itask->state != VIPX_TASK_STATE_PROCESS) {
+ vipx_err("task(%u/%u/%lu) state(%u) is invalid\n",
+ itask->message, itask->index,
+ itask->param1, itask->state);
+ vipx_err("work(%u/%u/%u)\n", work->id,
+ work->param0, work->param1);
+ vipx_task_print_all(&itf->taskmgr);
+ goto p_end;
+ }
+
+ switch (itask->message) {
+ case VIPX_TASK_INIT:
+ case VIPX_TASK_DEINIT:
+ case VIPX_TASK_CREATE:
+ case VIPX_TASK_DESTROY:
+ case VIPX_TASK_ALLOCATE:
+ case VIPX_TASK_REQUEST:
+ gidx = itask->param1;
+ itf->reply[gidx] = *work;
+ __vipx_interface_set_reply(itf, gidx);
+ break;
+ case VIPX_TASK_PROCESS:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vipx_task_trans_pro_to_com(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ itask->param2 = 0;
+ itask->param3 = 0;
+ vipx_graphmgr_queue(itf->cookie, itask);
+ itf->done_cnt++;
+ break;
+ default:
+ vipx_err("unresolved task message(%d) have arrived\n",
+ work->message);
+ break;
+ }
+p_end:
+ __vipx_interface_work_list_queue_free(work_list, work);
+ };
+ vipx_leave();
+}
+
+int vipx_interface_probe(struct vipx_system *sys)
+{
+ int ret;
+ struct platform_device *pdev;
+ struct vipx_interface *itf;
+ struct device *dev;
+ struct vipx_taskmgr *taskmgr;
+
+ vipx_enter();
+ pdev = to_platform_device(sys->dev);
+ itf = &sys->interface;
+ itf->system = sys;
+ dev = &pdev->dev;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ vipx_err("platform_get_irq(0) is fail (%d)\n", ret);
+ goto p_err_irq0;
+ }
+
+ itf->irq[REG_SS1] = ret;
+
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0) {
+ vipx_err("platform_get_irq(1) is fail (%d)\n", ret);
+ goto p_err_irq1;
+ }
+
+ itf->irq[REG_SS2] = ret;
+
+ ret = devm_request_irq(dev, itf->irq[REG_SS1], vipx_interface_isr0,
+ 0, dev_name(dev), itf);
+ if (ret) {
+ vipx_err("devm_request_irq(0) is fail (%d)\n", ret);
+ goto p_err_req_irq0;
+ }
+
+ ret = devm_request_irq(dev, itf->irq[REG_SS2], vipx_interface_isr1,
+ 0, dev_name(dev), itf);
+ if (ret) {
+ vipx_err("devm_request_irq(1) is fail (%d)\n", ret);
+ goto p_err_req_irq1;
+ }
+
+ itf->regs = sys->reg_ss[REG_SS1];
+
+ ret = vipx_slab_init(&itf->slab);
+ if (ret)
+ goto p_err_slab;
+
+ taskmgr = &itf->taskmgr;
+ spin_lock_init(&taskmgr->slock);
+ ret = vipx_task_init(taskmgr, VIPX_MAX_GRAPH, itf);
+ if (ret)
+ goto p_err_taskmgr;
+
+ itf->cookie = &sys->graphmgr;
+ itf->state = 0;
+
+ init_waitqueue_head(&itf->reply_wq);
+ spin_lock_init(&itf->process_barrier);
+
+ INIT_WORK(&itf->work_queue, vipx_interface_work_reply_func);
+ __vipx_interface_work_list_init(&itf->work_list, VIPX_WORK_MAX_COUNT);
+
+ vipx_leave();
+ return 0;
+p_err_taskmgr:
+ vipx_slab_deinit(&itf->slab);
+p_err_slab:
+ devm_free_irq(dev, itf->irq[REG_SS2], itf);
+p_err_req_irq1:
+ devm_free_irq(dev, itf->irq[REG_SS1], itf);
+p_err_req_irq0:
+p_err_irq1:
+p_err_irq0:
+ return ret;
+}
+
+void vipx_interface_remove(struct vipx_interface *itf)
+{
+ vipx_task_deinit(&itf->taskmgr);
+ vipx_slab_deinit(&itf->slab);
+ devm_free_irq(itf->system->dev, itf->irq[REG_SS2], itf);
+ devm_free_irq(itf->system->dev, itf->irq[REG_SS1], itf);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_INTERFACE_H__
+#define __VIPX_INTERFACE_H__
+
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+
+#include "vipx-config.h"
+#include "platform/vipx-ctrl.h"
+#include "vipx-slab.h"
+#include "vipx-taskmgr.h"
+
+#define VIPX_WORK_MAX_COUNT (20)
+#define VIPX_WORK_MAX_DATA (24)
+#define VIPX_COMMAND_TIMEOUT (3 * HZ)
+
+struct vipx_system;
+
+enum vipx_interrupt_direction {
+ IRQ0_TO_DEVICE,
+ IRQ1_TO_DEVICE,
+ IRQ_FROM_DEVICE,
+};
+
+enum vipx_work_message {
+ VIPX_WORK_MTYPE_BLK,
+ VIPX_WORK_MTYPE_NBLK,
+};
+
+enum vipx_interface_state {
+ VIPX_ITF_STATE_OPEN,
+ VIPX_ITF_STATE_BOOTUP,
+ VIPX_ITF_STATE_START
+};
+
+struct vipx_work {
+ struct list_head list;
+ unsigned int valid;
+ unsigned int id;
+ enum vipx_work_message message;
+ unsigned int param0;
+ unsigned int param1;
+ unsigned int param2;
+ unsigned int param3;
+ unsigned char data[VIPX_WORK_MAX_DATA];
+};
+
+struct vipx_work_list {
+ struct vipx_work work[VIPX_WORK_MAX_COUNT];
+ spinlock_t slock;
+ struct list_head free_head;
+ unsigned int free_cnt;
+ struct list_head reply_head;
+ unsigned int reply_cnt;
+};
+
+struct vipx_interface {
+ int irq[REG_MAX];
+ void __iomem *regs;
+ struct vipx_slab_allocator slab;
+ struct vipx_taskmgr taskmgr;
+ void *cookie;
+ unsigned long state;
+
+ wait_queue_head_t reply_wq;
+ spinlock_t process_barrier;
+ struct vipx_work_list work_list;
+ struct work_struct work_queue;
+
+ struct vipx_task *request[VIPX_MAX_GRAPH];
+ struct vipx_task *process;
+ struct vipx_work reply[VIPX_MAX_GRAPH];
+ unsigned int done_cnt;
+
+ struct vipx_mailbox_ctrl *mctrl;
+ struct vipx_system *system;
+};
+
+int vipx_hw_wait_bootup(struct vipx_interface *itf);
+
+int vipx_hw_config(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_process(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_create(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_destroy(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_init(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_deinit(struct vipx_interface *itf, struct vipx_task *itask);
+
+int vipx_hw_load_graph(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_unload_graph(struct vipx_interface *itf, struct vipx_task *itask);
+int vipx_hw_execute_graph(struct vipx_interface *itf, struct vipx_task *itask);
+
+int vipx_interface_start(struct vipx_interface *itf);
+int vipx_interface_stop(struct vipx_interface *itf);
+int vipx_interface_open(struct vipx_interface *itf, void *mbox);
+int vipx_interface_close(struct vipx_interface *itf);
+
+int vipx_interface_probe(struct vipx_system *sys);
+void vipx_interface_remove(struct vipx_interface *itf);
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/types.h>
-#include <linux/slab.h>
-
+#include "vipx-log.h"
+#include "vipx-debug.h"
#include "vipx-io.h"
-void *mem2iocpy(void *dst,void *src, u32 size)
+
+void *vipx_io_copy_mem2io(void *dst, void *src, size_t size)
{
- u8 *src8;
- u8 *dst8;
+ unsigned char *src8;
+ unsigned char *dst8;
+
+ src8 = (unsigned char *)(src);
+ dst8 = (unsigned char *)(dst);
- src8 = (u8 *)(src);
- dst8 = (u8 *)(dst);
/* first copy byte by byte till the source first alignment
* this step is necessary to ensure we do not even try to access
* data which is before the source buffer, hence it is not ours.
*/
/* complete the left overs */
- while (size--)
- {
+ while (size--) {
IOW8(*dst8, *src8);
dst8++;
src8++;
}
+
return dst;
}
-void *io2memcpy(void *dst,void *src, u32 size)
+void *vipx_io_copy_io2mem(void *dst, void *src, size_t size)
{
- u8 *src8;
- u8 *dst8;
+ unsigned char *src8;
+ unsigned char *dst8;
- src8 = (u8 *)(src);
- dst8 = (u8 *)(dst);
+ src8 = (unsigned char *)(src);
+ dst8 = (unsigned char *)(dst);
- while (size--)
- {
+ while (size--) {
*dst8 = IOR8(*src8);
dst8++;
src8++;
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_IO_H__
+#define __VIPX_IO_H__
+
+#include <linux/io.h>
+
+#define IOR8(port) readb((const void *)&port)
+#define IOR16(port) readw((const void *)&port)
+#define IOR32(port) readl((const void *)&port)
+#define IOR64(port) readq((const void *)&port)
+
+#ifdef DEBUG_LOG_IO_WRITE
+#define IOW8(port, val) \
+ do { \
+ vipx_dbg("ADDR: %p, VAL: 0x%02x\r\n", &port, val); \
+ writeb(val, &port); \
+ } while (0)
+#define IOW16(port, val) \
+ do { \
+ vipx_dbg("ADDR: %p, VAL: 0x%04x\r\n", &port, val); \
+ writew(val, &port); \
+ } while (0)
+#define IOW32(port, val) \
+ do { \
+ vipx_dbg("ADDR: %p, VAL: 0x%08x\r\n", &port, val); \
+ writel(val, &port); \
+ } while (0)
+#define IOW64(port, val) \
+ do { \
+ vipx_dbg("ADDR: %p, VAL: 0x%016llx\r\n", &port, val); \
+ writeq(val, &port); \
+ } while (0)
+#else
+#define IOW8(port, val) writeb(val, &port)
+#define IOW16(port, val) writew(val, &port)
+#define IOW32(port, val) writel(val, &port)
+#define IOW64(port, val) writeq(val, &port)
+#endif
+
+void *vipx_io_copy_mem2io(void *dst, void *src, size_t size);
+void *vipx_io_copy_io2mem(void *dst, void *src, size_t size);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+
+#include "vipx-log.h"
+#include "vipx-core.h"
+#include "vipx-context.h"
+#include "vipx-ioctl.h"
+
+static int __vipx_ioctl_get_graph(struct vs4l_graph *karg,
+ struct vs4l_graph __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vipx_err("Copy failed [S_GRAPH] (%d)\n", ret);
+ goto p_err;
+ }
+
+ if (karg->size || karg->addr) {
+ ret = -EINVAL;
+ vipx_err("invalid parameters (%u/%#lx) [S_GRAPH]\n",
+ karg->size, karg->addr);
+ goto p_err;
+ } else {
+ karg->addr = 0;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_graph(struct vs4l_graph *karg,
+ struct vs4l_graph __user *uarg)
+{
+ vipx_enter();
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_format(struct vs4l_format_list *karg,
+ struct vs4l_format_list __user *uarg)
+{
+ int ret;
+ size_t size;
+ struct vs4l_format __user *uformat;
+ struct vs4l_format *kformat;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vipx_err("Copy failed [S_FORMAT] (%d)\n", ret);
+ goto p_err;
+ }
+
+ uformat = karg->formats;
+
+ size = karg->count * sizeof(*kformat);
+ kformat = kzalloc(size, GFP_KERNEL);
+ if (!kformat) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kformat (%zu)\n", size);
+ goto p_err;
+ }
+
+ ret = copy_from_user(kformat, uformat, size);
+ if (ret) {
+ vipx_err("Copy failed [S_FORMAT] (%d)\n", ret);
+ goto p_err_free;
+ }
+
+ karg->formats = kformat;
+
+ vipx_leave();
+ return 0;
+p_err_free:
+ kfree(kformat);
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_format(struct vs4l_format_list *karg,
+ struct vs4l_format_list __user *uarg)
+{
+ vipx_enter();
+ kfree(karg->formats);
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_param(struct vs4l_param_list *karg,
+ struct vs4l_param_list __user *uarg)
+{
+ int ret;
+ unsigned int idx;
+ size_t size;
+ struct vs4l_param __user *uparam;
+ struct vs4l_param *kparam;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vipx_err("Copy failed [S_PARAM] (%d)\n", ret);
+ goto p_err;
+ }
+
+ uparam = karg->params;
+
+ size = karg->count * sizeof(*kparam);
+ kparam = kzalloc(size, GFP_KERNEL);
+ if (!kparam) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kparam (%zu)\n", size);
+ goto p_err;
+ }
+
+ ret = copy_from_user(kparam, uparam, size);
+ if (ret) {
+ vipx_err("Copy failed [S_PARAM] (%d)\n", ret);
+ goto p_err_free;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ if (kparam[idx].size || kparam[idx].addr) {
+ vipx_err("invalid parameters ([%d]%u/%#lx) [S_PARAM]\n",
+ idx, kparam[idx].size,
+ kparam[idx].addr);
+ goto p_err_free;
+ }
+ }
+
+ karg->params = kparam;
+ vipx_leave();
+ return 0;
+p_err_free:
+ kfree(kparam);
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_param(struct vs4l_param_list *karg,
+ struct vs4l_param_list __user *uarg)
+{
+ vipx_enter();
+ kfree(karg->params);
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_ctrl(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vipx_err("Copy failed [S_CTRL] (%d)\n", ret);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_ctrl(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl __user *uarg)
+{
+ vipx_enter();
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_container(struct vs4l_container_list *karg,
+ struct vs4l_container_list __user *uarg)
+{
+ int ret;
+ size_t size;
+ unsigned int idx;
+ struct vs4l_container *ucon;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer *ubuf;
+ struct vs4l_buffer *kbuf;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vipx_err("Copy failed [CONTAINER] (%d)\n", ret);
+ goto p_err;
+ }
+
+ ucon = karg->containers;
+
+ size = karg->count * sizeof(*kcon);
+ kcon = kzalloc(size, GFP_KERNEL);
+ if (!kcon) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kcon (%zu)\n", size);
+ goto p_err;
+ }
+
+ karg->containers = kcon;
+
+ ret = copy_from_user(kcon, ucon, size);
+ if (ret) {
+ vipx_err("Copy failed [CONTAINER] (%d)\n", ret);
+ goto p_err_free;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ ubuf = kcon[idx].buffers;
+
+ size = kcon[idx].count * sizeof(*kbuf);
+ kbuf = kzalloc(size, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kbuf (%zu)\n", size);
+ goto p_err_free;
+ }
+
+ kcon[idx].buffers = kbuf;
+
+ ret = copy_from_user(kbuf, ubuf, size);
+ if (ret) {
+ vipx_err("Copy failed [CONTAINER] (%d)\n", ret);
+ goto p_err_free;
+ }
+ }
+
+ vipx_leave();
+ return 0;
+p_err_free:
+ for (idx = 0; idx < karg->count; ++idx)
+ kfree(kcon[idx].buffers);
+
+ kfree(kcon);
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_container(struct vs4l_container_list *karg,
+ struct vs4l_container_list __user *uarg)
+{
+ int ret;
+ unsigned int idx;
+
+ vipx_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vipx_err("Copy failed to user [CONTAINER]\n");
+
+ for (idx = 0; idx < karg->count; ++idx)
+ kfree(karg->containers[idx].buffers);
+
+ kfree(karg->containers);
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_load_kernel_binary(
+ struct vipx_ioc_load_kernel_binary *karg,
+ struct vipx_ioc_load_kernel_binary __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vipx_err("Copy failed [Load kernel binary] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_load_kernel_binary(
+ struct vipx_ioc_load_kernel_binary *karg,
+ struct vipx_ioc_load_kernel_binary __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vipx_err("Copy failed to user [Load kernel binary]\n");
+
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_load_graph_info(
+ struct vipx_ioc_load_graph_info *karg,
+ struct vipx_ioc_load_graph_info __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vipx_err("Copy failed [Load graph info] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_load_graph_info(
+ struct vipx_ioc_load_graph_info *karg,
+ struct vipx_ioc_load_graph_info __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vipx_err("Copy failed to user [Load graph info]\n");
+
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_unload_graph_info(
+ struct vipx_ioc_unload_graph_info *karg,
+ struct vipx_ioc_unload_graph_info __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vipx_err("Copy failed [Unload graph info] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_unload_graph_info(
+ struct vipx_ioc_unload_graph_info *karg,
+ struct vipx_ioc_unload_graph_info __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vipx_err("Copy failed to user [Unload graph info]\n");
+
+ vipx_leave();
+}
+
+static int __vipx_ioctl_get_execute_submodel(
+ struct vipx_ioc_execute_submodel *karg,
+ struct vipx_ioc_execute_submodel __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vipx_err("Copy failed [Execute submodel] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_ioctl_put_execute_submodel(
+ struct vipx_ioc_execute_submodel *karg,
+ struct vipx_ioc_execute_submodel __user *uarg)
+{
+ int ret;
+
+ vipx_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vipx_err("Copy failed to user [Execute submodel]\n");
+
+ vipx_leave();
+}
+
+long vipx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct vipx_context *vctx;
+ const struct vipx_ioctl_ops *ops;
+ union vipx_ioc_arg karg;
+ void __user *uarg;
+
+ vipx_enter();
+ vctx = file->private_data;
+ ops = vctx->core->ioc_ops;
+ uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case VS4L_VERTEXIOC_S_GRAPH:
+ ret = __vipx_ioctl_get_graph(&karg.graph, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_graph(vctx, &karg.graph);
+ __vipx_ioctl_put_graph(&karg.graph, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_FORMAT:
+ ret = __vipx_ioctl_get_format(&karg.flist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_format(vctx, &karg.flist);
+ __vipx_ioctl_put_format(&karg.flist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_PARAM:
+ ret = __vipx_ioctl_get_param(&karg.plist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_param(vctx, &karg.plist);
+ __vipx_ioctl_put_param(&karg.plist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_CTRL:
+ ret = __vipx_ioctl_get_ctrl(&karg.ctrl, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_ctrl(vctx, &karg.ctrl);
+ __vipx_ioctl_put_ctrl(&karg.ctrl, uarg);
+ break;
+ case VS4L_VERTEXIOC_STREAM_ON:
+ ret = ops->streamon(vctx);
+ break;
+ case VS4L_VERTEXIOC_STREAM_OFF:
+ ret = ops->streamoff(vctx);
+ break;
+ case VS4L_VERTEXIOC_QBUF:
+ ret = __vipx_ioctl_get_container(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->qbuf(vctx, &karg.clist);
+ __vipx_ioctl_put_container(&karg.clist, uarg);
+ break;
+ case VS4L_VERTEXIOC_DQBUF:
+ ret = __vipx_ioctl_get_container(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->dqbuf(vctx, &karg.clist);
+ __vipx_ioctl_put_container(&karg.clist, uarg);
+ break;
+ case VIPX_IOC_LOAD_KERNEL_BINARY:
+ ret = __vipx_ioctl_get_load_kernel_binary(&karg.kernel_bin,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_kernel_binary(vctx, &karg.kernel_bin);
+ __vipx_ioctl_put_load_kernel_binary(&karg.kernel_bin, uarg);
+ break;
+ case VIPX_IOC_LOAD_GRAPH_INFO:
+ ret = __vipx_ioctl_get_load_graph_info(&karg.load_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_graph_info(vctx, &karg.load_ginfo);
+ __vipx_ioctl_put_load_graph_info(&karg.load_ginfo, uarg);
+ break;
+ case VIPX_IOC_UNLOAD_GRAPH_INFO:
+ ret = __vipx_ioctl_get_unload_graph_info(&karg.unload_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->unload_graph_info(vctx, &karg.unload_ginfo);
+ __vipx_ioctl_put_unload_graph_info(&karg.unload_ginfo, uarg);
+ break;
+ case VIPX_IOC_EXECUTE_SUBMODEL:
+ ret = __vipx_ioctl_get_execute_submodel(&karg.exec, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->execute_submodel(vctx, &karg.exec);
+ __vipx_ioctl_put_execute_submodel(&karg.exec, uarg);
+ break;
+
+ default:
+ ret = -EINVAL;
+ vipx_err("ioc command(%x) is not supported\n", cmd);
+ goto p_err;
+ }
+
+ vipx_leave();
+p_err:
+ return ret;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_IOCTL_H__
+#define __VIPX_IOCTL_H__
+
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "vs4l.h"
+#include "vipx-common-type.h"
+
+#define VIPX_CTRL_VIPX_BASE (0x00010000)
+#define VIPX_CTRL_DUMP (VIPX_CTRL_VIPX_BASE + 1)
+#define VIPX_CTRL_MODE (VIPX_CTRL_VIPX_BASE + 2)
+#define VIPX_CTRL_TEST (VIPX_CTRL_VIPX_BASE + 3)
+
+struct vipx_context;
+
+struct vipx_ioc_load_kernel_binary {
+ unsigned int size;
+ unsigned int global_id;
+ int kernel_fd;
+ unsigned int kernel_size;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vipx_ioc_load_graph_info {
+ unsigned int size;
+ struct vipx_common_graph_info graph_info;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vipx_ioc_unload_graph_info {
+ unsigned int size;
+ unsigned int graph_id;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vipx_ioc_execute_submodel {
+ unsigned int size;
+ struct vipx_common_execute_info execute_info;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+#define VIPX_IOC_LOAD_KERNEL_BINARY \
+ _IOWR('V', 0, struct vipx_ioc_load_kernel_binary)
+#define VIPX_IOC_LOAD_GRAPH_INFO \
+ _IOWR('V', 1, struct vipx_ioc_load_graph_info)
+#define VIPX_IOC_UNLOAD_GRAPH_INFO \
+ _IOWR('V', 2, struct vipx_ioc_unload_graph_info)
+#define VIPX_IOC_EXECUTE_SUBMODEL \
+ _IOWR('V', 3, struct vipx_ioc_execute_submodel)
+
+union vipx_ioc_arg {
+ struct vs4l_graph graph;
+ struct vs4l_format_list flist;
+ struct vs4l_param_list plist;
+ struct vs4l_ctrl ctrl;
+ struct vs4l_container_list clist;
+
+ struct vipx_ioc_load_kernel_binary kernel_bin;
+ struct vipx_ioc_load_graph_info load_ginfo;
+ struct vipx_ioc_unload_graph_info unload_ginfo;
+ struct vipx_ioc_execute_submodel exec;
+};
+
+struct vipx_ioctl_ops {
+ /* TODO: vs4l(temp) */
+ int (*set_graph)(struct vipx_context *vctx,
+ struct vs4l_graph *graph);
+ int (*set_format)(struct vipx_context *vctx,
+ struct vs4l_format_list *flist);
+ int (*set_param)(struct vipx_context *vctx,
+ struct vs4l_param_list *plist);
+ int (*set_ctrl)(struct vipx_context *vctx,
+ struct vs4l_ctrl *ctrl);
+ int (*qbuf)(struct vipx_context *vctx,
+ struct vs4l_container_list *clist);
+ int (*dqbuf)(struct vipx_context *vctx,
+ struct vs4l_container_list *clist);
+ int (*streamon)(struct vipx_context *vctx);
+ int (*streamoff)(struct vipx_context *vctx);
+ /* dal */
+ int (*load_kernel_binary)(struct vipx_context *vctx,
+ struct vipx_ioc_load_kernel_binary *args);
+ int (*load_graph_info)(struct vipx_context *vctx,
+ struct vipx_ioc_load_graph_info *args);
+ int (*unload_graph_info)(struct vipx_context *vctx,
+ struct vipx_ioc_unload_graph_info *args);
+ int (*execute_submodel)(struct vipx_context *vctx,
+ struct vipx_ioc_execute_submodel *args);
+};
+
+long vipx_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#if defined(CONFIG_COMPAT)
+long vipx_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+static inline long vipx_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg) { return 0; };
+#endif
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vipx-log.h"
+#include "vipx-common-type.h"
+#include "vipx-kernel-binary.h"
+
+int vipx_kernel_binary_set_gmodel(struct vipx_context *vctx,
+ struct vipx_graph_model *gmodel)
+{
+ struct vipx_kernel_binary *kbin, *temp, *gbin, *temp2;
+ unsigned int kid, gid;
+ int kfd, gfd;
+ size_t ksize, gsize;
+ bool is_duplicate;
+
+ vipx_enter();
+ gid = GET_COMMON_GRAPH_MODEL_ID(gmodel->id);
+ list_for_each_entry_safe(kbin, temp, &vctx->binary_list, clist) {
+ kid = GET_COMMON_GRAPH_MODEL_ID(kbin->global_id);
+ if (gid == kid) {
+ kfd = kbin->buffer.m.fd;
+ ksize = kbin->buffer.size;
+ is_duplicate = false;
+ list_for_each_entry_safe(gbin, temp2,
+ &gmodel->kbin_list, glist) {
+ gfd = gbin->buffer.m.fd;
+ gsize = gbin->buffer.size;
+ if ((kfd == gfd) && (ksize == gsize)) {
+ is_duplicate = true;
+ break;
+ }
+ }
+ if (!is_duplicate) {
+ //TODO check if list cleanup is needed
+ list_add_tail(&kbin->glist, &gmodel->kbin_list);
+ gmodel->kbin_count++;
+ }
+ }
+ }
+ vipx_leave();
+ return 0;
+}
+
+int vipx_kernel_binary_add(struct vipx_context *vctx, unsigned int id,
+ int fd, unsigned int size)
+{
+ int ret;
+ struct vipx_kernel_binary *kbin;
+ struct vipx_memory *mem;
+ unsigned long flags;
+
+ vipx_enter();
+ kbin = kzalloc(sizeof(*kbin), GFP_KERNEL);
+ if (!kbin) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc kernel binary\n");
+ goto p_err;
+ }
+ kbin->global_id = id;
+ kbin->buffer.m.fd = fd;
+ kbin->buffer.size = size;
+
+ mem = &vctx->core->system->memory;
+ ret = mem->mops->map_dmabuf(mem, &kbin->buffer);
+ if (ret)
+ goto p_err_map;
+
+ spin_lock_irqsave(&vctx->binary_slock, flags);
+ vctx->binary_count++;
+ list_add_tail(&kbin->clist, &vctx->binary_list);
+ spin_unlock_irqrestore(&vctx->binary_slock, flags);
+
+ kbin->vctx = vctx;
+ vipx_leave();
+ return 0;
+p_err_map:
+ kfree(kbin);
+p_err:
+ return ret;
+}
+
+void vipx_kernel_binary_remove(struct vipx_kernel_binary *kbin)
+{
+ struct vipx_context *vctx;
+ unsigned long flags;
+ struct vipx_memory *mem;
+
+ vipx_enter();
+ vctx = kbin->vctx;
+
+ spin_lock_irqsave(&vctx->binary_slock, flags);
+ list_del(&kbin->clist);
+ vctx->binary_count--;
+ spin_unlock_irqrestore(&vctx->binary_slock, flags);
+
+ mem = &vctx->core->system->memory;
+ mem->mops->unmap_dmabuf(mem, &kbin->buffer);
+
+ kfree(kbin);
+
+ vipx_leave();
+}
+
+void vipx_kernel_binary_all_remove(struct vipx_context *vctx)
+{
+ struct vipx_kernel_binary *kbin, *temp;
+
+ vipx_enter();
+ list_for_each_entry_safe(kbin, temp, &vctx->binary_list, clist) {
+ vipx_kernel_binary_remove(kbin);
+ }
+ vipx_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_KERNEL_BINARY_H__
+#define __VIPX_KERNEL_BINARY_H__
+
+#include "vipx-context.h"
+#include "vipx-memory.h"
+#include "vipx-graph.h"
+
+struct vipx_kernel_binary {
+ unsigned int global_id;
+ struct vipx_buffer buffer;
+ struct list_head clist;
+ struct list_head glist;
+
+ struct vipx_context *vctx;
+};
+
+int vipx_kernel_binary_set_gmodel(struct vipx_context *vctx,
+ struct vipx_graph_model *gmodel);
+int vipx_kernel_binary_add(struct vipx_context *vctx, unsigned int id,
+ int fd, unsigned int size);
+void vipx_kernel_binary_remove(struct vipx_kernel_binary *kbin);
+void vipx_kernel_binary_all_remove(struct vipx_context *vctx);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_LOG_H__
+#define __VIPX_LOG_H__
+
+#include <linux/kernel.h>
+
+#include "vipx-config.h"
+
+#define vipx_err(fmt, args...) \
+ pr_err("[VIPx][ERR](%d):" fmt, __LINE__, ##args)
+
+#define vipx_warn(fmt, args...) \
+ pr_warn("[VIPx][WRN](%d):" fmt, __LINE__, ##args)
+
+#define vipx_info(fmt, args...) \
+ pr_info("[VIPx]:" fmt, ##args)
+
+#define vipx_dbg(fmt, args...) \
+ pr_info("[VIPx][DBG](%d):" fmt, __LINE__, ##args)
+
+#if defined(DEBUG_LOG_CALL_TREE)
+#define vipx_enter() vipx_info("[%s] enter\n", __func__)
+#define vipx_leave() vipx_info("[%s] leave\n", __func__)
+#define vipx_check() vipx_info("[%s] check\n", __func__)
+#else
+#define vipx_enter()
+#define vipx_leave()
+#define vipx_check()
+#endif
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+
+#include "vipx-log.h"
+#include "vipx-io.h"
+#include "vipx-mailbox.h"
+
+static ssize_t __vipx_mailbox_get_remain_size(struct vipx_mailbox_head *head)
+{
+ unsigned int rmsg_idx, wmsg_idx, mbox_size;
+ unsigned int ridx, widx;
+ unsigned int used;
+
+ vipx_check();
+ rmsg_idx = head->rmsg_idx;
+ wmsg_idx = head->wmsg_idx;
+ mbox_size = head->mbox_size;
+
+ ridx = (rmsg_idx >= mbox_size) ? rmsg_idx - mbox_size : rmsg_idx;
+ widx = (wmsg_idx >= mbox_size) ? wmsg_idx - mbox_size : wmsg_idx;
+
+ if (widx > ridx)
+ used = widx - ridx;
+ else if (widx < ridx)
+ used = (mbox_size - ridx) + widx;
+ else if (wmsg_idx != rmsg_idx)
+ used = mbox_size;
+ else
+ used = 0;
+
+ return mbox_size - used;
+}
+
+int vipx_mailbox_check_full(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ int wait)
+{
+ int ret;
+ struct vipx_mailbox_head *head;
+ int try_count = 1000;
+ ssize_t remain_size;
+
+ vipx_enter();
+ if (type == NORMAL_MSG) {
+ head = &mctrl->h2d_normal_head;
+ } else if (type == URGENT_MSG) {
+ head = &mctrl->h2d_urgent_head;
+ } else {
+ ret = -EINVAL;
+ vipx_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ while (try_count) {
+ remain_size = __vipx_mailbox_get_remain_size(head);
+ if (remain_size > 0) {
+ break;
+ } else if (remain_size < 0) {
+ ret = -EFAULT;
+ vipx_err("index of mbox(%u) is invalid (%u/%u/%u/%u)\n",
+ type, head->wmsg_idx, head->rmsg_idx,
+ head->mbox_size, head->elem_size);
+ goto p_err;
+ } else if (wait) {
+ vipx_warn("mbox(%u) is full (%u/%u/%u/%u/%d)\n",
+ type, head->wmsg_idx, head->rmsg_idx,
+ head->mbox_size, head->elem_size,
+ try_count);
+ udelay(10);
+ try_count--;
+ } else {
+ break;
+ }
+ }
+
+ if (!remain_size) {
+ ret = -EBUSY;
+ vipx_err("mbox(%u) is full (%u/%u/%u/%u)\n",
+ type, head->wmsg_idx, head->rmsg_idx,
+ head->mbox_size, head->elem_size);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vipx_mailbox_write(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg, size_t size)
+{
+ int ret;
+ struct vipx_mailbox_head *head;
+ unsigned int wmsg_idx, mbox_size;
+ unsigned int widx;
+ struct vipx_h2d_message *h2d_msg;
+ void *wptr;
+ struct vipx_h2d_message *debug;
+
+ vipx_enter();
+ if (size > sizeof(struct vipx_h2d_message)) {
+ ret = -EINVAL;
+ vipx_err("message size(%zu/%zu) for mbox(%u) is invalid\n",
+ size, sizeof(struct vipx_h2d_message), type);
+ goto p_err;
+ }
+
+ if (type == NORMAL_MSG) {
+ head = &mctrl->h2d_normal_head;
+ h2d_msg = mctrl->h2d_normal_data;
+ } else if (type == URGENT_MSG) {
+ head = &mctrl->h2d_urgent_head;
+ h2d_msg = mctrl->h2d_urgent_data;
+ } else {
+ ret = -EINVAL;
+ vipx_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ wmsg_idx = head->wmsg_idx;
+ mbox_size = head->mbox_size;
+
+ widx = (wmsg_idx >= mbox_size) ? wmsg_idx - mbox_size : wmsg_idx;
+
+ wptr = &h2d_msg[widx];
+ vipx_io_copy_mem2io(wptr, msg, size);
+
+ widx = wmsg_idx + 1;
+
+ debug = msg;
+ vipx_dbg("[MBOX(%d)-W] mbox[v%u/r:%u/w:%u/s:%u/e:%u]\n",
+ type, mctrl->mbox_version,
+ head->rmsg_idx, head->wmsg_idx,
+ mbox_size, head->elem_size);
+ vipx_dbg("[MBOX(%d)-W] message[v%u/t:%u/s:%u/tid:%u/mid:%u]\n",
+ type, debug->head.msg_version, debug->head.msg_type,
+ debug->head.msg_size, debug->head.trans_id,
+ debug->head.msg_id);
+#ifdef DEBUG_LOG_MAILBOX_DUMP
+ for (ret = 0; ret < debug->head.msg_size >> 2; ++ret) {
+ vipx_dbg("[MBOX(%d)-W][%4d] %#10x\n",
+ type, ret, ((int *)&debug->body)[ret]);
+ }
+#endif
+
+ head->wmsg_idx = (widx >= (mbox_size << 1)) ?
+ widx - (mbox_size << 1) : widx;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static inline int __vipx_mailbox_is_empty(struct vipx_mailbox_head *head)
+{
+ vipx_check();
+ return head->wmsg_idx == head->rmsg_idx;
+}
+
+int vipx_mailbox_check_reply(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ int wait)
+{
+ int ret;
+ struct vipx_mailbox_head *head;
+ int try_count = 1000;
+
+ vipx_enter();
+ if (type == NORMAL_MSG) {
+ head = &mctrl->d2h_normal_head;
+ } else if (type == URGENT_MSG) {
+ head = &mctrl->d2h_urgent_head;
+ } else {
+ ret = -EINVAL;
+ vipx_err("invalid mbox type (%u)\n", type);
+ goto p_err;
+ }
+
+ while (try_count) {
+ ret = __vipx_mailbox_is_empty(head);
+ if (ret) {
+ if (!wait)
+ break;
+
+ udelay(10);
+ try_count--;
+ } else {
+ break;
+ }
+ }
+
+ if (ret) {
+ ret = -EFAULT;
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vipx_mailbox_read(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg)
+{
+ int ret;
+ struct vipx_mailbox_head *head;
+ unsigned int rmsg_idx, mbox_size;
+ unsigned int ridx;
+ struct vipx_d2h_message *d2h_msg;
+ void *rptr;
+ struct vipx_d2h_message *debug;
+
+ vipx_enter();
+ if (type == NORMAL_MSG) {
+ head = &mctrl->d2h_normal_head;
+ d2h_msg = mctrl->d2h_normal_data;
+ } else if (type == URGENT_MSG) {
+ head = &mctrl->d2h_urgent_head;
+ d2h_msg = mctrl->d2h_urgent_data;
+ } else {
+ ret = -EINVAL;
+ vipx_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ rmsg_idx = head->rmsg_idx;
+ mbox_size = head->mbox_size;
+
+ ridx = (rmsg_idx >= mbox_size) ? rmsg_idx - mbox_size : rmsg_idx;
+
+ rptr = &d2h_msg[ridx];
+ vipx_io_copy_io2mem(msg, rptr, sizeof(struct vipx_d2h_message));
+
+ ridx = rmsg_idx + 1;
+
+ debug = msg;
+ vipx_dbg("[MBOX(%d)-R] mbox[v%u/r:%u/w:%u/s:%u/e:%u]\n",
+ type, mctrl->mbox_version,
+ head->rmsg_idx, head->wmsg_idx,
+ mbox_size, head->elem_size);
+ vipx_dbg("[MBOX(%d)-R] message[v%u/t:%u/s:%u/tid:%u/mid:%u]\n",
+ type, debug->head.msg_version, debug->head.msg_type,
+ debug->head.msg_size, debug->head.trans_id,
+ debug->head.msg_id);
+#ifdef DEBUG_LOG_MAILBOX_DUMP
+ for (ret = 0; ret < debug->head.msg_size >> 2; ++ret) {
+ vipx_dbg("[MBOX(%d)-R][%4d] %#10x\n",
+ type, ret, ((int *)&debug->body)[ret]);
+ }
+#endif
+
+ head->rmsg_idx = (ridx >= (mbox_size << 1)) ?
+ ridx - (mbox_size << 1) : ridx;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_mailbox_init(struct vipx_mailbox_head *head,
+ unsigned short mbox_size, unsigned short elem_size)
+{
+ vipx_enter();
+ head->rmsg_idx = 0;
+ head->wmsg_idx = 0;
+
+ head->mbox_size = mbox_size;
+ head->elem_size = elem_size;
+ vipx_leave();
+}
+
+int vipx_mailbox_init(struct vipx_mailbox_ctrl *mctrl)
+{
+ vipx_enter();
+ mctrl->mbox_version = MAILBOX_VERSION;
+ mctrl->msg_version = MESSAGE_VERSION;
+
+ __vipx_mailbox_init(&mctrl->h2d_normal_head, MAX_NORMAL_MSG_COUNT,
+ sizeof(mctrl->h2d_normal_data[0]));
+ __vipx_mailbox_init(&mctrl->h2d_urgent_head, MAX_URGENT_MSG_COUNT,
+ sizeof(mctrl->h2d_urgent_data[0]));
+ __vipx_mailbox_init(&mctrl->d2h_normal_head, MAX_NORMAL_MSG_COUNT,
+ sizeof(mctrl->d2h_normal_data[0]));
+ __vipx_mailbox_init(&mctrl->d2h_urgent_head, MAX_URGENT_MSG_COUNT,
+ sizeof(mctrl->d2h_urgent_data[0]));
+
+ vipx_leave();
+ return 0;
+}
+
+int vipx_mailbox_deinit(struct vipx_mailbox_ctrl *mctrl)
+{
+ vipx_enter();
+ vipx_leave();
+ return 0;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_MAILBOX_H__
+#define __VIPX_MAILBOX_H__
+
+#include "vipx-message.h"
+
+#define MAILBOX_VERSION (1)
+#define MAX_NORMAL_MSG_COUNT (8)
+#define MAX_URGENT_MSG_COUNT (4)
+
+enum vipx_mailbox_msg_type {
+ NORMAL_MSG,
+ URGENT_MSG,
+};
+
+enum vipx_mailbox_wait {
+ MAILBOX_NOWAIT,
+ MAILBOX_WAIT,
+};
+
+struct vipx_mailbox_head {
+ unsigned short rmsg_idx;
+ unsigned short wmsg_idx;
+ unsigned short mbox_size;
+ unsigned short elem_size;
+};
+
+struct vipx_mailbox_ctrl {
+ unsigned int mbox_version;
+ unsigned int msg_version;
+
+ struct vipx_mailbox_head h2d_normal_head;
+ struct vipx_h2d_message h2d_normal_data[MAX_NORMAL_MSG_COUNT];
+
+ struct vipx_mailbox_head h2d_urgent_head;
+ struct vipx_h2d_message h2d_urgent_data[MAX_URGENT_MSG_COUNT];
+
+ struct vipx_mailbox_head d2h_normal_head;
+ struct vipx_d2h_message d2h_normal_data[MAX_NORMAL_MSG_COUNT];
+
+ struct vipx_mailbox_head d2h_urgent_head;
+ struct vipx_d2h_message d2h_urgent_data[MAX_URGENT_MSG_COUNT];
+};
+
+int vipx_mailbox_check_full(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ int wait);
+int vipx_mailbox_write(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg, size_t size);
+
+int vipx_mailbox_check_reply(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ int wait);
+int vipx_mailbox_read(struct vipx_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg);
+
+int vipx_mailbox_init(struct vipx_mailbox_ctrl *mctrl);
+int vipx_mailbox_deinit(struct vipx_mailbox_ctrl *mctrl);
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <video/videonode.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-#include <linux/firmware.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/videodev2.h>
-#include <linux/videodev2_exynos_camera.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/dma-buf.h>
#include <linux/ion_exynos.h>
-#include <asm/cacheflush.h>
-
-
-#include "vipx-config.h"
-#include "vipx-memory.h"
-
#include <linux/exynos_iovmm.h>
+#include <linux/of_reserved_mem.h>
+#include "vipx-log.h"
+#include "vipx-mailbox.h"
+#include "vipx-system.h"
+#include "vipx-memory.h"
-
-extern void *vipx_fw_code_rmem_base;
-
-#if defined(CONFIG_VIDEOBUF2_DMA_SG)
-/* vipx vb2 buffer operations */
-static inline ulong vipx_vb2_ion_plane_kvaddr(
- struct vipx_vb2_buf *vbuf, u32 plane)
-
-{
- return (ulong)vb2_plane_vaddr(&vbuf->vb.vb2_buf, plane);
-}
-
-static inline ulong vipx_vb2_ion_plane_cookie(
- struct vipx_vb2_buf *vbuf, u32 plane)
-{
- return (ulong)vb2_plane_cookie(&vbuf->vb.vb2_buf, plane);
-}
-
-static dma_addr_t vipx_vb2_ion_plane_dvaddr(
- struct vipx_vb2_buf *vbuf, u32 plane)
-
-{
- return (ulong)vb2_dma_sg_plane_dma_addr(&vbuf->vb.vb2_buf, plane);
-}
-#if 0
-static void vipx_vb2_ion_plane_prepare(struct vipx_vb2_buf *vbuf,
- u32 plane, bool exact)
-{
- struct vb2_buffer *vb = &vbuf->vb.vb2_buf;
- enum dma_data_direction dir;
- unsigned long size;
- u32 spare;
-
- /* skip meta plane */
- spare = vb->num_planes - 1;
- if (plane == spare)
- return;
-
- dir = V4L2_TYPE_IS_OUTPUT(vb->type) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- size = exact ?
- vb2_get_plane_payload(vb, plane) : vb2_plane_size(vb, plane);
-
- vb2_ion_sync_for_device((void *)vb2_plane_cookie(vb, plane), 0,
- size, dir);
-}
-
-static void vipx_vb2_ion_plane_finish(struct vipx_vb2_buf *vbuf,
- u32 plane, bool exact)
-{
- struct vb2_buffer *vb = &vbuf->vb.vb2_buf;
- enum dma_data_direction dir;
- unsigned long size;
- u32 spare;
-
- /* skip meta plane */
- spare = vb->num_planes - 1;
- if (plane == spare)
- return;
-
- dir = V4L2_TYPE_IS_OUTPUT(vb->type) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- size = exact ?
- vb2_get_plane_payload(vb, plane) : vb2_plane_size(vb, plane);
-
- vb2_ion_sync_for_cpu((void *)vb2_plane_cookie(vb, plane), 0,
- size, dir);
-}
-
-static void vipx_vb2_ion_buf_prepare(struct vipx_vb2_buf *vbuf, bool exact)
-{
- struct vb2_buffer *vb = &vbuf->vb.vb2_buf;
- enum dma_data_direction dir;
- unsigned long size;
- u32 plane;
- u32 spare;
-
- dir = V4L2_TYPE_IS_OUTPUT(vb->type) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- /* skip meta plane */
- spare = vb->num_planes - 1;
-
- for (plane = 0; plane < spare; plane++) {
- size = exact ?
- vb2_get_plane_payload(vb, plane) : vb2_plane_size(vb, plane);
-
- vb2_ion_sync_for_device((void *)vb2_plane_cookie(vb, plane), 0,
- size, dir);
- }
-}
-
-static void vipx_vb2_ion_buf_finish(struct vipx_vb2_buf *vbuf, bool exact)
-{
- struct vb2_buffer *vb = &vbuf->vb.vb2_buf;
- enum dma_data_direction dir;
- unsigned long size;
- u32 plane;
- u32 spare;
-
- dir = V4L2_TYPE_IS_OUTPUT(vb->type) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- /* skip meta plane */
- spare = vb->num_planes - 1;
-
- for (plane = 0; plane < spare; plane++) {
- size = exact ?
- vb2_get_plane_payload(vb, plane) : vb2_plane_size(vb, plane);
-
- vb2_ion_sync_for_cpu((void *)vb2_plane_cookie(vb, plane), 0,
- size, dir);
- }
-
- clear_bit(VIPX_VBUF_CACHE_INVALIDATE, &vbuf->cache_state);
-}
-#endif
-
-const struct vipx_vb2_buf_ops vipx_vb2_buf_ops_ion = {
- .plane_kvaddr = vipx_vb2_ion_plane_kvaddr,
- .plane_cookie = vipx_vb2_ion_plane_cookie,
- .plane_dvaddr = vipx_vb2_ion_plane_dvaddr,
-// .plane_prepare = vipx_vb2_ion_plane_prepare,
-// .plane_finish = vipx_vb2_ion_plane_finish,
-// .buf_prepare = vipx_vb2_ion_buf_prepare,
-// .buf_finish = vipx_vb2_ion_buf_finish,
-};
-
-/* vipx private buffer operations */
-static void vipx_ion_free(struct vipx_priv_buf *pbuf)
-{
- struct vipx_ion_ctx *alloc_ctx;
-
- alloc_ctx = pbuf->ctx;
- mutex_lock(&alloc_ctx->lock);
- if (pbuf->iova)
- ion_iovmm_unmap(pbuf->attachment, pbuf->iova);
- if (pbuf->kva)
- dma_buf_vunmap(pbuf->dma_buf, pbuf->kva);
- mutex_unlock(&alloc_ctx->lock);
-
- dma_buf_unmap_attachment(pbuf->attachment, pbuf->sgt,
- DMA_BIDIRECTIONAL);
- dma_buf_detach(pbuf->dma_buf, pbuf->attachment);
- dma_buf_put(pbuf->dma_buf);
-
- vfree(pbuf);
-}
-
-static void *vipx_ion_kvaddr(struct vipx_priv_buf *pbuf)
-{
- if (!pbuf)
- return 0;
-
- if (!pbuf->kva)
- pbuf->kva = dma_buf_vmap(pbuf->dma_buf);
-
- return pbuf->kva;
-
-}
-
-static dma_addr_t vipx_ion_dvaddr(struct vipx_priv_buf *pbuf)
-{
- dma_addr_t dva = 0;
-
- if (!pbuf)
- return -EINVAL;
-
- if (pbuf->iova == 0)
- return -EINVAL;
-
- dva = pbuf->iova;
-
- return (ulong)dva;
-}
-
-static phys_addr_t vipx_ion_phaddr(struct vipx_priv_buf *pbuf)
-{
- /* PHCONTIG option is not supported in ION */
- return 0;
-}
-
-static void vipx_ion_sync_for_device(struct vipx_priv_buf *pbuf,
+/* TODO check */
+void vipx_memory_sync_for_device(struct vipx_priv_mem *pmem,
off_t offset, size_t size, enum dma_data_direction dir)
{
-// struct vipx_ion_ctx *alloc_ctx = pbuf->ctx;
-
- if (pbuf->kva) {
- BUG_ON((offset < 0) || (offset > pbuf->size));
- BUG_ON((offset + size) < size);
- BUG_ON((size > pbuf->size) || ((offset + size) > pbuf->size));
+ vipx_enter();
+ if (pmem->kvaddr) {
+ WARN_ON((offset < 0) || (offset > pmem->size));
+ WARN_ON((offset + size) < size);
+ WARN_ON((size > pmem->size) || ((offset + size) > pmem->size));
- __dma_map_area(pbuf->kva + offset, size, dir);
+ __dma_map_area(pmem->kvaddr + offset, size, dir);
}
+ vipx_leave();
}
-static void vipx_ion_sync_for_cpu(struct vipx_priv_buf *pbuf,
+void vipx_memory_sync_for_cpu(struct vipx_priv_mem *pmem,
off_t offset, size_t size, enum dma_data_direction dir)
{
-// struct vipx_ion_ctx *alloc_ctx = pbuf->ctx;
-
- if (pbuf->kva) {
- BUG_ON((offset < 0) || (offset > pbuf->size));
- BUG_ON((offset + size) < size);
- BUG_ON((size > pbuf->size) || ((offset + size) > pbuf->size));
+ vipx_enter();
+ if (pmem->kvaddr) {
+ WARN_ON((offset < 0) || (offset > pmem->size));
+ WARN_ON((offset + size) < size);
+ WARN_ON((size > pmem->size) || ((offset + size) > pmem->size));
- __dma_unmap_area(pbuf->kva + offset, size, dir);
+ __dma_unmap_area(pmem->kvaddr + offset, size, dir);
}
+ vipx_leave();
}
-const struct vipx_priv_buf_ops vipx_priv_buf_ops_ion = {
- .free = vipx_ion_free,
- .kvaddr = vipx_ion_kvaddr,
- .dvaddr = vipx_ion_dvaddr,
- .phaddr = vipx_ion_phaddr,
- .sync_for_device = vipx_ion_sync_for_device,
- .sync_for_cpu = vipx_ion_sync_for_cpu,
-};
-
-/* vipx memory operations */
-static void *vipx_ion_init(struct device *dev)
-{
- struct vipx_ion_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
-
- ctx->dev = dev;
- ctx->alignment = SZ_4K;
- ctx->flags = ION_FLAG_CACHED;
- mutex_init(&ctx->lock);
-
- return ctx;
-
-}
-
-static void vipx_ion_deinit(void *ctx)
-{
- struct vipx_ion_ctx *alloc_ctx = ctx;
-
- mutex_destroy(&alloc_ctx->lock);
- kfree(alloc_ctx);
-}
-
-static struct vipx_priv_buf *vipx_ion_alloc(void *ctx,
- size_t size, size_t align)
+static int vipx_memory_map_dmabuf(struct vipx_memory *mem,
+ struct vipx_buffer *buf)
{
- struct vipx_ion_ctx *alloc_ctx = ctx;
- struct vipx_priv_buf *buf;
- const char *heapname = "ion_system_heap";
- int ret = 0;
-
- buf = vzalloc(sizeof(*buf));
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- size = PAGE_ALIGN(size);
-
- buf->dma_buf = ion_alloc_dmabuf(heapname, size, ION_FLAG_NON_CACHED);
- //buf->dma_buf = ion_alloc_dmabuf(heapname, size, alloc_ctx->flags);
- if (IS_ERR(buf->dma_buf)) {
- ret = -ENOMEM;
- goto err_alloc;
+ int ret;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dvaddr;
+ void *kvaddr;
+
+ vipx_enter();
+ if (buf->m.fd <= 0) {
+ ret = -EINVAL;
+ vipx_err("fd(%d) is invalid\n", buf->m.fd);
+ goto p_err;
}
- buf->attachment = dma_buf_attach(buf->dma_buf, alloc_ctx->dev);
- if (IS_ERR(buf->attachment)) {
- ret = PTR_ERR(buf->attachment);
- goto err_attach;
+ dbuf = dma_buf_get(buf->m.fd);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ vipx_err("dma_buf is invalid (%d/%d)\n", buf->m.fd, ret);
+ goto p_err;
}
-
- buf->sgt = dma_buf_map_attachment(
- buf->attachment, DMA_BIDIRECTIONAL);
- if (IS_ERR(buf->sgt)) {
- ret = PTR_ERR(buf->sgt);
- goto err_map_dmabuf;
+ buf->dbuf = dbuf;
+
+ buf->aligned_size = PAGE_ALIGN(buf->size);
+ if (buf->aligned_size > dbuf->size) {
+ ret = -EINVAL;
+ vipx_err("size is invalid (%zu/%zu/%zu)\n",
+ buf->size, buf->aligned_size, dbuf->size);
+ goto p_err_size;
}
-
- buf->ctx = alloc_ctx;
- buf->size = size;
- buf->direction = DMA_BIDIRECTIONAL;
- buf->ops = &vipx_priv_buf_ops_ion;
-
- mutex_lock(&alloc_ctx->lock);
- buf->iova = ion_iovmm_map(buf->attachment, 0,
- buf->size, buf->direction, 0);
- if (IS_ERR_VALUE(buf->iova)) {
- ret = (int)buf->iova;
- mutex_unlock(&alloc_ctx->lock);
- goto err_ion_map_io;
+ attachment = dma_buf_attach(dbuf, mem->dev);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ vipx_err("failed to attach dma-buf (%d)\n", ret);
+ goto p_err_attach;
}
- mutex_unlock(&alloc_ctx->lock);
+ buf->attachment = attachment;
- return buf;
-
-err_ion_map_io:
- dma_buf_unmap_attachment(buf->attachment, buf->sgt,
- DMA_BIDIRECTIONAL);
-err_map_dmabuf:
- dma_buf_detach(buf->dma_buf, buf->attachment);
-err_attach:
- dma_buf_put(buf->dma_buf);
-err_alloc:
- vfree(buf);
-
-
- pr_err("%s: Error occured while allocating\n", __func__);
- return ERR_PTR(ret);
-}
-
-static int vipx_ion_resume(void *ctx)
-{
- struct vipx_ion_ctx *alloc_ctx = ctx;
- int ret = 0;
-
- if (!alloc_ctx)
- return -ENOENT;
-
- mutex_lock(&alloc_ctx->lock);
- if (alloc_ctx->iommu_active_cnt == 0)
- ret = iovmm_activate(alloc_ctx->dev);
- if (!ret)
- alloc_ctx->iommu_active_cnt++;
- mutex_unlock(&alloc_ctx->lock);
+ sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ vipx_err("failed to map attachment (%d)\n", ret);
+ goto p_err_map_attach;
+ }
+ buf->sgt = sgt;
+
+ dvaddr = ion_iovmm_map(attachment, 0, buf->aligned_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (IS_ERR_VALUE(dvaddr)) {
+ ret = (int)dvaddr;
+ vipx_err("failed to map iova (%d)\n", ret);
+ goto p_err_iova;
+ }
+ buf->dvaddr = dvaddr;
+
+ /* TODO: check sync */
+ kvaddr = dma_buf_vmap(dbuf);
+ if (IS_ERR(kvaddr)) {
+ ret = PTR_ERR(kvaddr);
+ vipx_err("failed to map kvaddr (%d)\n", ret);
+ goto p_err_kva;
+ }
+ buf->kvaddr = kvaddr;
+ vipx_leave();
+ return 0;
+p_err_kva:
+ ion_iovmm_unmap(attachment, dvaddr);
+p_err_iova:
+ dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+p_err_map_attach:
+ dma_buf_detach(dbuf, attachment);
+p_err_attach:
+p_err_size:
+ dma_buf_put(dbuf);
+p_err:
return ret;
}
-static void vipx_ion_suspend(void *ctx)
+static int vipx_memory_unmap_dmabuf(struct vipx_memory *mem,
+ struct vipx_buffer *buf)
{
- struct vipx_ion_ctx *alloc_ctx = ctx;
-
- if (!alloc_ctx)
- return;
-
- mutex_lock(&alloc_ctx->lock);
- BUG_ON(alloc_ctx->iommu_active_cnt == 0);
-
- if (--alloc_ctx->iommu_active_cnt == 0)
- iovmm_deactivate(alloc_ctx->dev);
- mutex_unlock(&alloc_ctx->lock);
+ vipx_enter();
+ dma_buf_vunmap(buf->dbuf, buf->kvaddr);
+ ion_iovmm_unmap(buf->attachment, buf->dvaddr);
+ dma_buf_unmap_attachment(buf->attachment, buf->sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(buf->dbuf, buf->attachment);
+ dma_buf_put(buf->dbuf);
+ vipx_leave();
+ return 0;
}
-const struct vipx_mem_ops vipx_mem_ops_ion = {
- .init = vipx_ion_init,
- .cleanup = vipx_ion_deinit,
- .resume = vipx_ion_resume,
- .suspend = vipx_ion_suspend,
- .alloc = vipx_ion_alloc,
-};
-#endif
-
-/* vipx private buffer operations */
-static void vipx_km_free(struct vipx_priv_buf *pbuf)
+static int vipx_memory_map_userptr(struct vipx_memory *mem,
+ struct vipx_buffer *buf)
{
- kfree(pbuf->kvaddr);
- kfree(pbuf);
+ vipx_enter();
+ vipx_leave();
+ return -EINVAL;
}
-static void *vipx_km_kvaddr(struct vipx_priv_buf *pbuf)
+static int vipx_memory_unmap_userptr(struct vipx_memory *mem,
+ struct vipx_buffer *buf)
{
- if (!pbuf)
- return 0;
-
- return pbuf->kvaddr;
+ vipx_enter();
+ vipx_leave();
+ return -EINVAL;
}
-static phys_addr_t vipx_km_phaddr(struct vipx_priv_buf *pbuf)
-{
- phys_addr_t pa = 0;
-
- if (!pbuf)
- return 0;
-
- pa = virt_to_phys(pbuf->kvaddr);
-
- return pa;
-}
-const struct vipx_priv_buf_ops vipx_priv_buf_ops_km = {
- .free = vipx_km_free,
- .kvaddr = vipx_km_kvaddr,
- .phaddr = vipx_km_phaddr,
+const struct vipx_memory_ops vipx_memory_ops = {
+ .map_dmabuf = vipx_memory_map_dmabuf,
+ .unmap_dmabuf = vipx_memory_unmap_dmabuf,
+ .map_userptr = vipx_memory_map_userptr,
+ .unmap_userptr = vipx_memory_unmap_userptr
};
-static struct vipx_priv_buf *vipx_kmalloc(size_t size, size_t align)
+static int __vipx_memory_alloc(struct vipx_memory *mem,
+ struct vipx_priv_mem *pmem)
{
- struct vipx_priv_buf *buf = NULL;
- int ret = 0;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- buf->kvaddr = kzalloc(size, GFP_KERNEL);
- if (!buf->kvaddr) {
- ret = -ENOMEM;
- goto err_priv_alloc;
+ int ret;
+ const char *heap_name = "ion_system_heap";
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dvaddr;
+ void *kvaddr;
+
+ vipx_enter();
+ dbuf = ion_alloc_dmabuf(heap_name, pmem->size, pmem->flags);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ vipx_err("Failed to allocate dma_buf (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_alloc;
}
+ pmem->dbuf = dbuf;
+
+ attachment = dma_buf_attach(dbuf, mem->dev);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ vipx_err("Failed to attach dma_buf (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_attach;
+ }
+ pmem->attachment = attachment;
+
+ sgt = dma_buf_map_attachment(attachment, pmem->direction);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ vipx_err("Failed to map attachment (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_map_attachment;
+ }
+ pmem->sgt = sgt;
- buf->size = size;
- buf->align = align;
- buf->ops = &vipx_priv_buf_ops_km;
-
- return buf;
-
-err_priv_alloc:
- kfree(buf);
+ dvaddr = ion_iovmm_map(attachment, 0, pmem->size, pmem->direction, 0);
+ if (IS_ERR_VALUE(dvaddr)) {
+ ret = (int)dvaddr;
+ vipx_err("Failed to map dvaddr (%d) [%s]\n", ret, pmem->name);
+ goto p_err_map_dva;
+ }
+ pmem->dvaddr = dvaddr;
+
+ if (pmem->kmap) {
+ kvaddr = dma_buf_vmap(dbuf);
+ if (IS_ERR(kvaddr)) {
+ ret = PTR_ERR(kvaddr);
+ vipx_err("Failed to map kvaddr (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_kmap;
+ }
+ pmem->kvaddr = kvaddr;
+ }
- return ERR_PTR(ret);
+ vipx_leave();
+ return 0;
+p_err_kmap:
+ ion_iovmm_unmap(attachment, dvaddr);
+p_err_map_dva:
+ dma_buf_unmap_attachment(attachment, sgt, pmem->direction);
+p_err_map_attachment:
+ dma_buf_detach(dbuf, attachment);
+p_err_attach:
+ dma_buf_put(dbuf);
+p_err_alloc:
+ return ret;
}
-int vipx_memory_probe(struct vipx_memory *mem, struct device *dev)
+static void __vipx_memory_free(struct vipx_memory *mem,
+ struct vipx_priv_mem *pmem)
{
- u32 ret = 0;
-
-#if defined(CONFIG_VIDEOBUF2_DMA_SG)
- mem->vipx_mem_ops = &vipx_mem_ops_ion;
- mem->vb2_mem_ops = &vb2_dma_sg_memops;
- mem->vipx_vb2_buf_ops = &vipx_vb2_buf_ops_ion;
- mem->kmalloc = &vipx_kmalloc;
-#endif
-
- mem->dev = dev;
- mem->iommu_domain = get_domain_from_dev(dev);
-
- mem->default_ctx = CALL_PTR_MEMOP(mem, init, dev);
- if (IS_ERR_OR_NULL(mem->default_ctx)) {
- if (IS_ERR(mem->default_ctx))
- ret = PTR_ERR(mem->default_ctx);
- else
- ret = -EINVAL;
- goto p_err;
- }
-
-p_err:
- probe_info("%s():%d\n", __func__, ret);
- return 0;
+ vipx_enter();
+ if (pmem->kmap)
+ dma_buf_vunmap(pmem->dbuf, pmem->kvaddr);
+
+ ion_iovmm_unmap(pmem->attachment, pmem->dvaddr);
+ dma_buf_unmap_attachment(pmem->attachment, pmem->sgt, pmem->direction);
+ dma_buf_detach(pmem->dbuf, pmem->attachment);
+ dma_buf_put(pmem->dbuf);
+ dma_buf_put(pmem->dbuf);
+ vipx_leave();
}
-dma_addr_t vipx_allocate_heap(struct vipx_memory *mem, u32 size)
+dma_addr_t vipx_memory_allocate_heap(struct vipx_memory *mem,
+ int id, size_t size)
{
- int ret = 0;
- struct vipx_minfo *minfo;
+ int ret;
+ struct vipx_priv_mem *heap;
- BUG_ON(!mem);
+ vipx_enter();
+ heap = &mem->heap;
- minfo = &mem->info;
+ snprintf(heap->name, VIPX_PRIV_MEM_NAME_LEN, "vipx_heap_%u", id);
+ heap->size = PAGE_ALIGN(size);
+ heap->flags = 0;
+ heap->direction = DMA_BIDIRECTIONAL;
+ heap->kmap = false;
- if (size != 0) {
- minfo->pb_heap = vipx_ion_alloc(mem->default_ctx, size, SZ_4K);
- if (IS_ERR_OR_NULL(minfo->pb_heap)) {
- vipx_err("failed to allocate buffer for VIPX_CM7_HEAP_SIZE");
- ret = -ENOMEM;
- }
-#ifdef DUMP_DEBUG_LOG_REGION
- minfo->kvaddr_heap = vipx_ion_kvaddr(minfo->pb_heap);
-#endif
- minfo->dvaddr_heap = vipx_ion_dvaddr(minfo->pb_heap);
- vipx_info("heap kva(0x%p), dva(0x%x), size(%ld)\n",
- minfo->kvaddr_heap, (u32)minfo->dvaddr_heap, minfo->pb_heap->size);
- }
- return minfo->dvaddr_heap;
+ ret = __vipx_memory_alloc(mem, heap);
+ if (ret)
+ goto p_err;
+ vipx_leave();
+ return heap->dvaddr;
+p_err:
+ return ret;
}
-void vipx_free_heap(struct vipx_memory *mem, struct vipx_binary *binary, u32 heap_size)
+void vipx_memory_free_heap(struct vipx_memory *mem)
{
- struct vipx_minfo *minfo;
-#ifdef DUMP_DEBUG_LOG_REGION
- int ret;
-#endif
- BUG_ON(!mem);
-
- minfo = &mem->info;
-#ifdef DUMP_DEBUG_LOG_REGION
-
- vipx_info("heap kva %p\n", minfo->kvaddr_heap);
-
- if (minfo->pb_heap) {
- if (!IS_ERR_OR_NULL(minfo->kvaddr_heap)) {
- ret = vipx_binary_write(binary, VIPX_FW_PATH1, "vipx_heap.bin", minfo->kvaddr_heap, heap_size);
- if (ret)
- vipx_err("vipx_binary_write is fail(%d)\n", ret);
- }
- }
-#endif
-
- vipx_info("Free heap\n");
- if (minfo->pb_heap) {
- vipx_ion_free(minfo->pb_heap);
- minfo->pb_heap = 0;
- minfo->kvaddr_heap = 0;
- minfo->dvaddr_heap = 0;
- }
+ vipx_enter();
+ __vipx_memory_free(mem, &mem->heap);
+ vipx_leave();
}
int vipx_memory_open(struct vipx_memory *mem)
{
- int ret = 0;
- struct vipx_minfo *minfo;
-
- BUG_ON(!mem);
-
- minfo = &mem->info;
-
- /* TODO */
- if (VIPX_CM7_DRAM_BIN_SIZE != 0) {
-#if 1
- minfo->paddr_fw = VIPX_IOVA_DRAM_FIRMWARE;
- minfo->kvaddr_fw = vipx_fw_code_rmem_base;
- minfo->dvaddr_fw = VIPX_IOVA_DRAM_FIRMWARE;
- iommu_map(mem->iommu_domain, VIPX_IOVA_DRAM_FIRMWARE, minfo->paddr_fw,
- VIPX_CM7_DRAM_BIN_SIZE, 0);
- //memset(minfo->kvaddr_fw, 0x0, VIPX_CM7_DRAM_BIN_SIZE);
-#else
- minfo->kvaddr_fw = dmam_alloc_coherent(mem->dev, VIPX_CM7_DRAM_BIN_SIZE,
- &minfo->paddr_fw, GFP_KERNEL);
- if (IS_ERR_OR_NULL(minfo->kvaddr_fw)) {
- vipx_err("Failed to allocate coherent memory: %ld\n",
- PTR_ERR(minfo->kvaddr_fw));
- ret = PTR_ERR(minfo->kvaddr_fw);
- goto p_err;
- }
- vipx_info("%s(%pa) is mapped on %p with size of %d\n",
- "vipx_firmware", &minfo->paddr_fw, minfo->kvaddr_fw,
- VIPX_CM7_DRAM_BIN_SIZE);
-
- minfo->dvaddr_fw = VIPX_IOVA_DRAM_FIRMWARE;
-
- memset(minfo->kvaddr_fw, 0x0, VIPX_CM7_DRAM_BIN_SIZE);
- iommu_map(mem->iommu_domain, VIPX_IOVA_DRAM_FIRMWARE, minfo->paddr_fw,
- VIPX_CM7_DRAM_BIN_SIZE, 0);
-#endif
- }
-
- if (VIPX_MBOX_SIZE != 0) {
- minfo->kvaddr_mbox = dmam_alloc_coherent(mem->dev, VIPX_MBOX_SIZE,
- &minfo->paddr_mbox, GFP_KERNEL);
- if (IS_ERR_OR_NULL(minfo->kvaddr_mbox)) {
- vipx_err("Failed to allocate coherent memory: %ld\n",
- PTR_ERR(minfo->kvaddr_mbox));
- ret = PTR_ERR(minfo->kvaddr_mbox);
- goto p_err;
- }
- vipx_info("%s(%pa) is mapped on %p with size of %d\n",
- "vipx_mbox", &minfo->paddr_mbox, minfo->kvaddr_mbox,
- VIPX_MBOX_SIZE);
-
- minfo->dvaddr_mbox = VIPX_IOVA_DRAM_MBOX;
-
- //memset(minfo->kvaddr_mbox, 0x0, VIPX_MBOX_SIZE);
- iommu_map(mem->iommu_domain, VIPX_IOVA_DRAM_MBOX, minfo->paddr_mbox,
- VIPX_MBOX_SIZE, 0);
- }
-
- if (VIPX_DEBUG_SIZE != 0) {
- minfo->pb_debug = vipx_ion_alloc(mem->default_ctx, VIPX_DEBUG_SIZE, SZ_4K);
- if (IS_ERR_OR_NULL(minfo->pb_debug)) {
- vipx_err("failed to allocate buffer for VIPX_DEBUG_SIZE");
- ret = -ENOMEM;
- }
- minfo->kvaddr_debug = vipx_ion_kvaddr(minfo->pb_debug);
- minfo->dvaddr_debug = vipx_ion_dvaddr(minfo->pb_debug);
- vipx_info("debug kva(0x%p), dva(0x%x), size(%ld)\n", minfo->kvaddr_debug, (u32)minfo->dvaddr_debug, minfo->pb_debug->size);
+ int ret;
+ struct vipx_priv_mem *fw;
+ struct vipx_priv_mem *mbox;
+ struct vipx_priv_mem *heap;
+ struct vipx_priv_mem *debug;
+
+ vipx_enter();
+ fw = &mem->fw;
+ mbox = &mem->mbox;
+ heap = &mem->heap;
+ debug = &mem->debug;
+
+ ret = iommu_map(mem->domain, fw->dvaddr, fw->paddr, fw->size, 0);
+ if (ret) {
+ vipx_err("iommu mapping is failed (0x%x/0x%x/%zu)(%d)\n",
+ (int)fw->dvaddr, (int)fw->paddr, fw->size, ret);
+ goto p_err_map;
}
-p_err:
+ /* TODO check */
+ //fw->kvaddr = dmam_alloc_coherent(mem->dev, fw->size, &fw->paddr,
+ // GFP_KERNEL);
+ //if (!fw->kvaddr) {
+ // ret = -ENOMEM;
+ // vipx_err("Failed to allocate coherent memory for CM7 DRAM\n");
+ // goto p_err_cm7;
+ //}
+ //
+ //iommu_map(mem->domain, fw->dvaddr, fw->paddr, fw->size, 0);
+
+ ret = __vipx_memory_alloc(mem, mbox);
+ if (ret)
+ goto p_err_mbox;
+
+ ret = __vipx_memory_alloc(mem, heap);
+ if (ret)
+ goto p_err_heap;
+
+ ret = __vipx_memory_alloc(mem, debug);
+ if (ret)
+ goto p_err_debug;
+
+ vipx_leave();
+ return 0;
+p_err_debug:
+ __vipx_memory_free(mem, heap);
+p_err_heap:
+ __vipx_memory_free(mem, mbox);
+p_err_mbox:
+ iommu_unmap(mem->domain, fw->dvaddr, fw->size);
+p_err_map:
return ret;
}
int vipx_memory_close(struct vipx_memory *mem)
{
- int ret = 0;
- struct vipx_minfo *minfo;
-
- BUG_ON(!mem);
-
- minfo = &mem->info;
-
- iommu_unmap(mem->iommu_domain, VIPX_IOVA_DRAM_FIRMWARE, VIPX_CM7_DRAM_BIN_SIZE);
-
-#if 0
- if (!IS_ERR_OR_NULL(minfo->kvaddr_fw)) {
- dmam_free_coherent(mem->dev, VIPX_CM7_DRAM_BIN_SIZE, minfo->kvaddr_fw,
- minfo->paddr_fw);
- }
-#endif
+ struct vipx_priv_mem *fw;
+ struct vipx_priv_mem *mbox;
+ struct vipx_priv_mem *heap;
+ struct vipx_priv_mem *debug;
+
+ vipx_enter();
+ fw = &mem->fw;
+ mbox = &mem->mbox;
+ heap = &mem->heap;
+ debug = &mem->debug;
+
+ __vipx_memory_free(mem, debug);
+ __vipx_memory_free(mem, heap);
+ __vipx_memory_free(mem, mbox);
+
+ iommu_unmap(mem->domain, fw->dvaddr, fw->size);
+ /* TODO check */
+ //dmam_free_coherent(mem->dev, fw->size, fw->kvaddr, fw->paddr);
+
+ vipx_leave();
+ return 0;
+}
- iommu_unmap(mem->iommu_domain, VIPX_IOVA_DRAM_MBOX, VIPX_MBOX_SIZE);
+struct reserved_mem *vipx_cm7_rmem;
- if (!IS_ERR_OR_NULL(minfo->kvaddr_mbox)) {
- dmam_free_coherent(mem->dev, VIPX_MBOX_SIZE, minfo->kvaddr_mbox,
- minfo->paddr_mbox);
- }
+static int __init vipx_cm7_rmem_setup(struct reserved_mem *rmem)
+{
+ vipx_enter();
+ vipx_info("base=%pa, size=%pa\n", &rmem->base, &rmem->size);
+ vipx_cm7_rmem = rmem;
+ vipx_leave();
+ return 0;
+}
- if (minfo->pb_debug) {
- vipx_ion_free(minfo->pb_debug);
- minfo->pb_debug = 0;
- minfo->kvaddr_debug = 0;
- minfo->dvaddr_debug = 0;
- }
+RESERVEDMEM_OF_DECLARE(vipx_cm7_rmem, "exynos,vipx_fw_code_rmem",
+ vipx_cm7_rmem_setup);
- if (minfo->pb_heap) {
- vipx_ion_free(minfo->pb_heap);
- minfo->pb_heap = 0;
- minfo->kvaddr_heap = 0;
- minfo->dvaddr_heap = 0;
- }
+int vipx_memory_probe(struct vipx_system *sys)
+{
+ struct device *dev;
+ struct vipx_memory *mem;
+ struct vipx_priv_mem *fw;
+ struct vipx_priv_mem *mbox;
+ struct vipx_priv_mem *heap;
+ struct vipx_priv_mem *debug;
+
+ vipx_enter();
+ dev = sys->dev;
+ dma_set_mask(dev, DMA_BIT_MASK(36));
+
+ mem = &sys->memory;
+ mem->dev = dev;
+ mem->domain = get_domain_from_dev(dev);
+ mem->mops = &vipx_memory_ops;
+
+ fw = &mem->fw;
+ mbox = &mem->mbox;
+ heap = &mem->heap;
+ debug = &mem->debug;
+
+ snprintf(fw->name, VIPX_PRIV_MEM_NAME_LEN, "vipx_cm7_dram_bin");
+ fw->size = vipx_cm7_rmem->size;
+ fw->kvaddr = phys_to_virt(vipx_cm7_rmem->base);
+ fw->paddr = vipx_cm7_rmem->base;
+ fw->dvaddr = VIPX_CM7_DRAM_BIN_DVADDR;
+
+ snprintf(mbox->name, VIPX_PRIV_MEM_NAME_LEN, "vipx_mbox");
+ mbox->size = PAGE_ALIGN(sizeof(struct vipx_mailbox_ctrl));
+ mbox->flags = 0;
+ mbox->direction = DMA_BIDIRECTIONAL;
+ mbox->kmap = true;
+
+ snprintf(heap->name, VIPX_PRIV_MEM_NAME_LEN, "vipx_heap");
+ heap->size = PAGE_ALIGN(VIPX_HEAP_SIZE);
+ heap->flags = 0;
+ heap->direction = DMA_BIDIRECTIONAL;
+
+ snprintf(debug->name, VIPX_PRIV_MEM_NAME_LEN, "vipx_debug");
+ debug->size = PAGE_ALIGN(VIPX_DEBUG_SIZE);
+ debug->flags = 0;
+ debug->direction = DMA_BIDIRECTIONAL;
+ /* TODO remove */
+ debug->kmap = true;
+
+ vipx_leave();
+ return 0;
+}
- return ret;
+void vipx_memory_remove(struct vipx_memory *mem)
+{
+ vipx_enter();
+ vipx_leave();
}
--- /dev/null
+/*
+ * Samsung Exynos5 SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_MEMORY_H__
+#define __VIPX_MEMORY_H__
+
+#include <linux/dma-buf.h>
+
+#include "vs4l.h"
+#include "vipx-common-type.h"
+
+/* TODO temp for test */
+#define VIPX_CM7_DRAM_BIN_DVADDR (0xB8000000)
+#define VIPX_HEAP_SIZE (SZ_1M)
+#define VIPX_DEBUG_SIZE (SZ_16M)
+
+#define VIPX_PRIV_MEM_NAME_LEN (30)
+
+struct vipx_system;
+struct vipx_memory;
+
+struct vipx_buffer {
+ unsigned char addr_type;
+ unsigned char mem_attr;
+ unsigned char mem_type;
+ unsigned char reserved;
+ size_t size;
+ size_t aligned_size;
+ union {
+ unsigned long userptr;
+ int fd;
+ } m;
+
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dvaddr;
+ void *kvaddr;
+
+ struct vs4l_roi roi;
+};
+
+struct vipx_memory_ops {
+ int (*map_dmabuf)(struct vipx_memory *mem, struct vipx_buffer *buf);
+ int (*unmap_dmabuf)(struct vipx_memory *mem, struct vipx_buffer *buf);
+ int (*map_userptr)(struct vipx_memory *mem, struct vipx_buffer *buf);
+ int (*unmap_userptr)(struct vipx_memory *mem, struct vipx_buffer *buf);
+};
+
+struct vipx_priv_mem {
+ char name[VIPX_PRIV_MEM_NAME_LEN];
+ size_t size;
+ long flags;
+ enum dma_data_direction direction;
+ bool kmap;
+
+ void *kvaddr;
+ dma_addr_t paddr;
+ dma_addr_t dvaddr;
+
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+};
+
+struct vipx_memory {
+ struct device *dev;
+ struct iommu_domain *domain;
+ const struct vipx_memory_ops *mops;
+
+ struct vipx_priv_mem fw;
+ struct vipx_priv_mem mbox;
+ struct vipx_priv_mem heap;
+ struct vipx_priv_mem debug;
+};
+
+dma_addr_t vipx_memory_allocate_heap(struct vipx_memory *mem,
+ int id, size_t size);
+void vipx_memory_free_heap(struct vipx_memory *mem);
+
+int vipx_memory_open(struct vipx_memory *mem);
+int vipx_memory_close(struct vipx_memory *mem);
+int vipx_memory_probe(struct vipx_system *sys);
+void vipx_memory_remove(struct vipx_memory *mem);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_MESSAGE_H__
+#define __VIPX_MESSAGE_H__
+
+#include "vipx-common-type.h"
+
+#define MESSAGE_VERSION (1)
+
+#define MAX_NUM_KERNEL (4)
+#define MAX_NUM_TEMP_BUF (32)
+#define MAX_SIZE_USER_PARA (1024)
+
+enum vipx_h2d_message_id {
+ H2D_INIT_REQ,
+ H2D_LOAD_GRAPH_REQ,
+ H2D_EXECUTE_REQ,
+ H2D_UNLOAD_GRAPH_REQ,
+ H2D_DEINIT_REQ,
+ H2D_MESSAGE_NUM
+};
+
+enum vipx_d2h_message_id {
+ D2H_INIT_RSP,
+ D2H_LOAD_GRAPH_RSP,
+ D2H_EXECUTE_RSP,
+ D2H_UNLOAD_GRAPH_RSP,
+ D2H_DEINIT_RSP,
+ D2H_BOOTUP_NTF,
+ D2H_MESSAGE_NUM
+};
+
+struct vipx_common_device_addr {
+ unsigned int addr;
+ unsigned int size;
+};
+
+struct vipx_h2d_init_req {
+ struct vipx_common_device_addr heap;
+ struct vipx_common_device_addr boot_kernal;
+};
+
+struct vipx_h2d_load_graph_req {
+ struct vipx_common_graph_info graph_info;
+ unsigned char user_para[MAX_SIZE_USER_PARA];
+};
+
+struct vipx_h2d_execute_req {
+ unsigned int num_kernel_bin;
+ unsigned int reserved;
+ struct vipx_common_device_addr kernel_bin[MAX_NUM_KERNEL];
+ struct vipx_common_execute_info execute_info;
+ unsigned char user_para[MAX_SIZE_USER_PARA];
+};
+
+struct vipx_h2d_unload_graph_req {
+ unsigned int gid;
+ unsigned int reserved;
+};
+
+struct vipx_h2d_deinit_req {
+ unsigned int dummy;
+ unsigned int reserved;
+};
+
+struct vipx_d2h_init_rsp {
+ int result;
+ unsigned int reserved;
+};
+
+struct vipx_d2h_load_graph_rsp {
+ int result;
+ unsigned int gid;
+};
+
+struct vipx_d2h_execute_rsp {
+ int result;
+ unsigned int gid;
+ unsigned int macro_sgid;
+ unsigned int reserved;
+};
+
+struct vipx_d2h_unload_graph_rsp {
+ int result;
+ unsigned int gid;
+};
+
+struct vipx_d2h_deinit_rsp {
+ int result;
+ unsigned int reserved;
+};
+
+struct vipx_d2h_bootup_ntf {
+ int result;
+ unsigned int reserved;
+};
+
+struct vipx_common_message_head {
+ unsigned int mbox_version;
+ unsigned int msg_version;
+ unsigned int msg_type;
+ unsigned int msg_size;
+ unsigned int trans_id;
+ unsigned int msg_id;
+};
+
+union vipx_h2d_message_body {
+ struct vipx_h2d_init_req init_req;
+ struct vipx_h2d_load_graph_req load_graph_req;
+ struct vipx_h2d_execute_req execute_req;
+ struct vipx_h2d_unload_graph_req unload_graph_req;
+ struct vipx_h2d_deinit_req deinit_req;
+};
+
+struct vipx_h2d_message {
+ struct vipx_common_message_head head;
+ union vipx_h2d_message_body body;
+};
+
+union vipx_d2h_message_body {
+ struct vipx_d2h_bootup_ntf bootup_ntf;
+ struct vipx_d2h_init_rsp init_rsp;
+ struct vipx_d2h_load_graph_rsp load_graph_rsp;
+ struct vipx_d2h_execute_rsp execute_rsp;
+ struct vipx_d2h_unload_graph_rsp unload_graph_rsp;
+ struct vipx_d2h_deinit_rsp deinit_rsp;
+};
+
+struct vipx_d2h_message {
+ struct vipx_common_message_head head;
+ union vipx_d2h_message_body body;
+};
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/ion_exynos.h>
+
+#include "vipx-log.h"
+#include "vipx-device.h"
+#include "vipx-core.h"
+#include "vipx-context.h"
#include "vipx-queue.h"
-#include "vipx-config.h"
-#include "vipx-taskmgr.h"
-#include "vipx-graph.h"
-extern const struct vb_ops vb_ops;
-extern const struct vipx_queue_ops vipx_queue_ops;
+static struct vipx_format_type vipx_fmts[] = {
+ {
+ .name = "RGB",
+ .colorspace = VS4L_DF_IMAGE_RGB,
+ .planes = 1,
+ .bitsperpixel = { 24 }
+ }, {
+ .name = "ARGB",
+ .colorspace = VS4L_DF_IMAGE_RGBX,
+ .planes = 1,
+ .bitsperpixel = { 32 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .colorspace = VS4L_DF_IMAGE_NV12,
+ .planes = 2,
+ .bitsperpixel = { 8, 8 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .colorspace = VS4L_DF_IMAGE_NV21,
+ .planes = 2,
+ .bitsperpixel = { 8, 8 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cr/Cb",
+ .colorspace = VS4L_DF_IMAGE_YV12,
+ .planes = 3,
+ .bitsperpixel = { 8, 4, 4 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .colorspace = VS4L_DF_IMAGE_I420,
+ .planes = 3,
+ .bitsperpixel = { 8, 4, 4 }
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .colorspace = VS4L_DF_IMAGE_I422,
+ .planes = 3,
+ .bitsperpixel = { 8, 4, 4 }
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .colorspace = VS4L_DF_IMAGE_YUYV,
+ .planes = 1,
+ .bitsperpixel = { 16 }
+ }, {
+ .name = "YUV 4:4:4 packed, YCbCr",
+ .colorspace = VS4L_DF_IMAGE_YUV4,
+ .planes = 1,
+ .bitsperpixel = { 24 }
+ }, {
+ .name = "VX unsigned 8 bit",
+ .colorspace = VS4L_DF_IMAGE_U8,
+ .planes = 1,
+ .bitsperpixel = { 8 }
+ }, {
+ .name = "VX unsigned 16 bit",
+ .colorspace = VS4L_DF_IMAGE_U16,
+ .planes = 1,
+ .bitsperpixel = { 16 }
+ }, {
+ .name = "VX unsigned 32 bit",
+ .colorspace = VS4L_DF_IMAGE_U32,
+ .planes = 1,
+ .bitsperpixel = { 32 }
+ }, {
+ .name = "VX signed 16 bit",
+ .colorspace = VS4L_DF_IMAGE_S16,
+ .planes = 1,
+ .bitsperpixel = { 16 }
+ }, {
+ .name = "VX signed 32 bit",
+ .colorspace = VS4L_DF_IMAGE_S32,
+ .planes = 1,
+ .bitsperpixel = { 32 }
+ }
+};
-int vipx_queue_init(struct vipx_queue *queue,
- struct vipx_memory *memory,
- struct mutex *lock)
+static struct vipx_format_type *__vipx_queue_find_format(unsigned int format)
{
- int ret = 0;
- struct vb_queue *inq, *otq;
+ int ret;
+ unsigned int idx;
+ struct vipx_format_type *fmt = NULL;
+
+ vipx_enter();
+ for (idx = 0; idx < ARRAY_SIZE(vipx_fmts); ++idx) {
+ if (vipx_fmts[idx].colorspace == format) {
+ fmt = &vipx_fmts[idx];
+ break;
+ }
+ }
+
+ if (!fmt) {
+ ret = -EINVAL;
+ vipx_err("Vision format is invalid (%u)\n", format);
+ goto p_err;
+ }
- queue->qops = &vipx_queue_ops;
- inq = &queue->inqueue;
- otq = &queue->otqueue;
- inq->private_data = queue;
- otq->private_data = queue;
+ vipx_leave();
+ return fmt;
+p_err:
+ return ERR_PTR(ret);
+}
- ret = vb_queue_init(inq, memory->dev, memory->vb2_mem_ops, &vb_ops, lock, VS4L_DIRECTION_IN);
- if (ret) {
- vipx_err("vb_queue_init is fail(%d)\n", ret);
+static int __vipx_queue_plane_size(struct vipx_buffer_format *format)
+{
+ int ret;
+ unsigned int plane;
+ struct vipx_format_type *fmt;
+ unsigned int width, height;
+
+ vipx_enter();
+ fmt = format->fmt;
+ if (fmt->planes > VIPX_MAX_PLANES) {
+ ret = -EINVAL;
+ vipx_err("planes(%u) is too big (%u)\n",
+ fmt->planes, VIPX_MAX_PLANES);
goto p_err;
}
- ret = vb_queue_init(otq, memory->dev, memory->vb2_mem_ops, &vb_ops, lock, VS4L_DIRECTION_OT);
- if (ret) {
- vipx_err("vb_queue_init is fail(%d)\n", ret);
+ for (plane = 0; plane < fmt->planes; ++plane) {
+ width = format->width *
+ fmt->bitsperpixel[plane] / BITS_PER_BYTE;
+ height = format->height *
+ fmt->bitsperpixel[plane] / BITS_PER_BYTE;
+ format->size[plane] = width * height;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vipx_queue_wait_for_done(struct vipx_queue *queue)
+{
+ int ret;
+ /* TODO: check wait time */
+ unsigned long wait_time = 10000;
+
+ vipx_enter();
+ if (!queue->streaming) {
+ ret = -EINVAL;
+ vipx_err("queue is not streaming status\n");
goto p_err;
}
+ if (!list_empty(&queue->done_list))
+ return 0;
+
+ mutex_unlock(queue->lock);
+
+ ret = wait_event_interruptible_timeout(queue->done_wq,
+ !list_empty(&queue->done_list) || !queue->streaming,
+ msecs_to_jiffies(wait_time));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ vipx_err("Wait time(%lu ms) is over\n", wait_time);
+ } else if (ret > 0) {
+ ret = 0;
+ } else {
+ vipx_err("Waiting has ended abnormaly (%d)\n", ret);
+ }
+
+ mutex_lock(queue->lock);
+
+ vipx_leave();
+ return 0;
p_err:
return ret;
}
-int vipx_queue_s_format(struct vipx_queue *queue, struct vs4l_format_list *f)
+static struct vipx_bundle *__vipx_queue_get_done_bundle(
+ struct vipx_queue *queue,
+ struct vs4l_container_list *clist)
{
- int ret = 0;
- struct vb_queue *q, *inq, *otq;
+ int ret;
+ struct vipx_bundle *bundle;
+ unsigned long flags;
+
+ vipx_enter();
+ /* Wait for at least one buffer to become available on the done_list */
+ ret = __vipx_queue_wait_for_done(queue);
+ if (ret)
+ goto p_err;
- BUG_ON(!queue);
- BUG_ON(!f);
+ /*
+ * Driver's lock has been held since we last verified that done_list
+ * is not empty, so no need for another list_empty(done_list) check.
+ */
+ spin_lock_irqsave(&queue->done_lock, flags);
- inq = &queue->inqueue;
- otq = &queue->otqueue;
+ bundle = list_first_entry(&queue->done_list, struct vipx_bundle,
+ done_entry);
- if (f->direction == VS4L_DIRECTION_IN)
- q = inq;
- else
- q = otq;
+ list_del(&bundle->done_entry);
+ atomic_dec(&queue->done_count);
+
+ spin_unlock_irqrestore(&queue->done_lock, flags);
+
+ vipx_leave();
+ return bundle;
+p_err:
+ return ERR_PTR(ret);
+}
+
+static void __vipx_queue_fill_vs4l_buffer(struct vipx_bundle *bundle,
+ struct vs4l_container_list *kclist)
+{
+ struct vipx_container_list *vclist;
+
+ vipx_enter();
+ vclist = &bundle->clist;
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_TIMESTAMP);
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_PREPARE);
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_INVALID);
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_DONE);
+
+ if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &vclist->flags)) {
+ if (sizeof(kclist->timestamp) == sizeof(vclist->timestamp)) {
+ kclist->flags |= (1 << VS4L_CL_FLAG_TIMESTAMP);
+ memcpy(kclist->timestamp, vclist->timestamp,
+ sizeof(vclist->timestamp));
+ } else {
+ vipx_warn("timestamp of clist is different (%zu/%zu)\n",
+ sizeof(kclist->timestamp),
+ sizeof(vclist->timestamp));
+ }
+ }
+
+ if (test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
+ kclist->flags |= (1 << VS4L_CL_FLAG_PREPARE);
+
+ if (test_bit(VS4L_CL_FLAG_INVALID, &bundle->flags))
+ kclist->flags |= (1 << VS4L_CL_FLAG_INVALID);
+
+ if (test_bit(VS4L_CL_FLAG_DONE, &bundle->flags))
+ kclist->flags |= (1 << VS4L_CL_FLAG_DONE);
+
+ kclist->index = vclist->index;
+ kclist->id = vclist->id;
+ vipx_leave();
+}
+
+static int __vipx_queue_map_dmabuf(struct vipx_queue *queue,
+ struct vipx_buffer *buf, size_t size)
+{
+ int ret;
+ struct vipx_core *core;
+ struct vipx_memory *mem;
+
+ vipx_enter();
+ core = queue->qlist->vctx->core;
+ mem = &core->system->memory;
+
+ buf->size = size;
+ ret = mem->mops->map_dmabuf(mem, buf);
+ if (ret)
+ goto p_err;
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vipx_queue_unmap_dmabuf(struct vipx_queue *queue,
+ struct vipx_buffer *buf)
+{
+ struct vipx_core *core;
+ struct vipx_memory *mem;
+
+ vipx_enter();
+ core = queue->qlist->vctx->core;
+ mem = &core->system->memory;
+ mem->mops->unmap_dmabuf(mem, buf);
+ vipx_leave();
+}
+
+static int __vipx_queue_bundle_prepare(struct vipx_queue *queue,
+ struct vipx_bundle *bundle)
+{
+ int ret;
+ struct vipx_container *con;
+ struct vipx_buffer *buf;
+ struct vipx_buffer_format *fmt;
+ unsigned int c_cnt, count, b_cnt;
+
+ vipx_enter();
+ if (test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
+ return 0;
+
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ switch (con[c_cnt].type) {
+ case VS4L_BUFFER_LIST:
+ case VS4L_BUFFER_ROI:
+ case VS4L_BUFFER_PYRAMID:
+ count = con[c_cnt].count;
+ break;
+ default:
+ ret = -EINVAL;
+ vipx_err("container type is invalid (%u)\n",
+ con[c_cnt].type);
+ goto p_err;
+ }
+
+ fmt = con[c_cnt].format;
+ switch (con[c_cnt].memory) {
+ case VS4L_MEMORY_DMABUF:
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < count; ++b_cnt) {
+ ret = __vipx_queue_map_dmabuf(queue,
+ &buf[b_cnt],
+ fmt->size[b_cnt]);
+ if (ret)
+ goto p_err;
+ }
+ break;
+ default:
+ vipx_err("container memory type is invalid (%u)\n",
+ con[c_cnt].memory);
+ goto p_err;
+ }
+ }
+ set_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
- ret = CALL_QOPS(queue, format, f);
- if (ret) {
- vipx_err("CALL_QOPS(format) is fail(%d)\n", ret);
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vipx_queue_bundle_unprepare(struct vipx_queue *queue,
+ struct vipx_bundle *bundle)
+{
+ int ret;
+ struct vipx_container *con;
+ struct vipx_buffer *buf;
+ unsigned int c_cnt, count, b_cnt;
+
+ vipx_enter();
+ if (!test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
goto p_err;
+
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ switch (con[c_cnt].type) {
+ case VS4L_BUFFER_LIST:
+ case VS4L_BUFFER_ROI:
+ case VS4L_BUFFER_PYRAMID:
+ count = con[c_cnt].count;
+ break;
+ default:
+ ret = -EINVAL;
+ vipx_err("container type is invalid (%u)\n",
+ con[c_cnt].type);
+ goto p_err;
+ }
+
+ switch (con[c_cnt].memory) {
+ case VS4L_MEMORY_DMABUF:
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < count; ++b_cnt)
+ __vipx_queue_unmap_dmabuf(queue, &buf[b_cnt]);
+ break;
+ default:
+ vipx_err("container memory type is invalid (%u)\n",
+ con[c_cnt].memory);
+ goto p_err;
+ }
}
- ret = vb_queue_s_format(q, f);
- if (ret) {
- vipx_err("vb_queue_s_format is fail(%d)\n", ret);
+ clear_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vipx_queue_bundle_check(struct vipx_bundle *bundle,
+ struct vs4l_container_list *kclist)
+{
+ int ret;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer *kbuf;
+ struct vipx_container_list *vclist;
+ struct vipx_container *vcon;
+ struct vipx_buffer *vbuf;
+ unsigned int c_cnt, b_cnt;
+
+ vipx_enter();
+ vclist = &bundle->clist;
+
+ if (vclist->direction != kclist->direction) {
+ ret = -EINVAL;
+ vipx_err("direction of clist is different (%u/%u)\n",
+ vclist->direction, kclist->direction);
goto p_err;
}
+ if (vclist->index != kclist->index) {
+ ret = -EINVAL;
+ vipx_err("index of clist is different (%u/%u)\n",
+ vclist->index, kclist->index);
+ goto p_err;
+ }
+
+ if (vclist->count != kclist->count) {
+ ret = -EINVAL;
+ vipx_err("container count of clist is differnet (%u/%u)\n",
+ vclist->count, kclist->count);
+ goto p_err;
+ }
+
+ vclist->id = kclist->id;
+ vclist->flags = kclist->flags;
+
+ vcon = vclist->containers;
+ kcon = kclist->containers;
+ for (c_cnt = 0; c_cnt < vclist->count; ++c_cnt) {
+ if (vcon[c_cnt].target != kcon[c_cnt].target) {
+ ret = -EINVAL;
+ vipx_err("target of container is different (%u/%u)\n",
+ vcon[c_cnt].target, kcon[c_cnt].target);
+ goto p_err;
+ }
+
+ if (vcon[c_cnt].count != kcon[c_cnt].count) {
+ ret = -EINVAL;
+ vipx_err("count of container is different (%u/%u)\n",
+ vcon[c_cnt].count, kcon[c_cnt].count);
+ goto p_err;
+ }
+
+ vbuf = vcon[c_cnt].buffers;
+ kbuf = kcon[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < vcon[c_cnt].count; ++b_cnt) {
+ vbuf[b_cnt].roi = kbuf[b_cnt].roi;
+ if (vbuf[b_cnt].m.fd != kbuf[b_cnt].m.fd) {
+ ret = -EINVAL;
+ vipx_err("fd of buffer is different (%d/%d)\n",
+ vbuf[b_cnt].m.fd,
+ kbuf[b_cnt].m.fd);
+ goto p_err;
+ }
+ }
+ }
+
+ vipx_leave();
+ return 0;
p_err:
return ret;
}
-int vipx_queue_start(struct vipx_queue *queue)
+static int __vipx_queue_bundle_alloc(struct vipx_queue *queue,
+ struct vs4l_container_list *kclist)
{
- int ret = 0;
- struct vb_queue *inq, *otq;
+ int ret;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer *kbuf;
+ struct vipx_bundle *bundle;
+ struct vipx_container_list *vclist;
+ struct vipx_container *vcon;
+ struct vipx_buffer *vbuf;
+ size_t alloc_size;
+ char *ptr;
+ unsigned int c_cnt, f_cnt, b_cnt;
+
+ vipx_enter();
+ /* All memory allocation and pointer value assignment */
+ kcon = kclist->containers;
+ alloc_size = sizeof(*bundle);
+ for (c_cnt = 0; c_cnt < kclist->count; ++c_cnt)
+ alloc_size += sizeof(*vcon) + sizeof(*vbuf) * kcon[c_cnt].count;
+
+ bundle = kzalloc(alloc_size, GFP_KERNEL);
+ if (!bundle) {
+ ret = -ENOMEM;
+ vipx_err("Failed to alloc bundle (%zu)\n", alloc_size);
+ goto p_err_alloc;
+ }
- inq = &queue->inqueue;
- otq = &queue->otqueue;
+ vclist = &bundle->clist;
+ ptr = (char *)bundle + sizeof(*bundle);
+ vclist->containers = (struct vipx_container *)ptr;
+ ptr += sizeof(*vcon) * kclist->count;
+ for (c_cnt = 0; c_cnt < kclist->count; ++c_cnt) {
+ vclist->containers[c_cnt].buffers = (struct vipx_buffer *)ptr;
+ ptr += sizeof(*vbuf) * kcon[c_cnt].count;
+ }
+
+ vclist->direction = kclist->direction;
+ vclist->id = kclist->id;
+ vclist->index = kclist->index;
+ vclist->flags = kclist->flags;
+ vclist->count = kclist->count;
+
+ if (sizeof(vclist->user_params) != sizeof(kclist->user_params)) {
+ ret = -EINVAL;
+ vipx_err("user params size is invalid (%zu/%zu)\n",
+ sizeof(vclist->user_params),
+ sizeof(kclist->user_params));
+ goto p_err_param;
+ }
+ memcpy(vclist->user_params, kclist->user_params,
+ sizeof(vclist->user_params));
+
+ vcon = vclist->containers;
+ for (c_cnt = 0; c_cnt < kclist->count; ++c_cnt) {
+ vcon[c_cnt].type = kcon[c_cnt].type;
+ vcon[c_cnt].target = kcon[c_cnt].target;
+ vcon[c_cnt].memory = kcon[c_cnt].memory;
+ if (sizeof(vcon[c_cnt].reserved) !=
+ sizeof(kcon[c_cnt].reserved)) {
+ ret = -EINVAL;
+ vipx_err("container reserve is invalid (%zu/%zu)\n",
+ sizeof(vcon[c_cnt].reserved),
+ sizeof(kcon[c_cnt].reserved));
+ goto p_err_param;
+ }
+ memcpy(vcon->reserved, kcon[c_cnt].reserved,
+ sizeof(vcon->reserved));
+ vcon[c_cnt].count = kcon[c_cnt].count;
+
+ for (f_cnt = 0; f_cnt < queue->flist.count; ++f_cnt) {
+ if (vcon[c_cnt].target ==
+ queue->flist.formats[f_cnt].target) {
+ vcon[c_cnt].format =
+ &queue->flist.formats[f_cnt];
+ break;
+ }
+ }
+
+ if (!vcon[c_cnt].format) {
+ ret = -EINVAL;
+ vipx_err("Format(%u) is not set\n", vcon[c_cnt].target);
+ goto p_err_param;
+ }
+
+ vbuf = vcon[c_cnt].buffers;
+ kbuf = kcon[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < kcon[c_cnt].count; ++b_cnt) {
+ vbuf[b_cnt].roi = kbuf[b_cnt].roi;
+ vbuf[b_cnt].m.fd = kbuf[b_cnt].m.fd;
+ }
+ }
+
+ queue->bufs[kclist->index] = bundle;
+ queue->num_buffers++;
- ret = vb_queue_start(inq);
- if (ret) {
- vipx_err("vb_queue_init is fail(%d)\n", ret);
+ vipx_leave();
+ return 0;
+p_err_param:
+ kfree(bundle);
+p_err_alloc:
+ return ret;
+}
+
+static void __vipx_queue_bundle_free(struct vipx_queue *queue,
+ struct vipx_bundle *bundle)
+{
+ vipx_enter();
+ queue->num_buffers--;
+ queue->bufs[bundle->clist.index] = NULL;
+ kfree(bundle);
+ vipx_leave();
+}
+
+static int __vipx_queue_s_format(struct vipx_queue *queue,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+ unsigned int idx;
+ struct vipx_buffer_format *vformat;
+ struct vs4l_format *kformat;
+ struct vipx_format_type *fmt;
+
+ vipx_enter();
+ vformat = kcalloc(flist->count, sizeof(*vformat), GFP_KERNEL);
+ if (!vformat) {
+ ret = -ENOMEM;
+ vipx_err("Failed to allocate vformats\n");
+ goto p_err_alloc;
+ }
+ queue->flist.count = flist->count;
+ queue->flist.formats = vformat;
+ kformat = flist->formats;
+
+ for (idx = 0; idx < flist->count; ++idx) {
+ fmt = __vipx_queue_find_format(kformat[idx].format);
+ if (IS_ERR(fmt)) {
+ ret = PTR_ERR(fmt);
+ goto p_err_find;
+ }
+
+ vformat[idx].fmt = fmt;
+ vformat[idx].colorspace = kformat[idx].format;
+ vformat[idx].target = kformat[idx].target;
+ vformat[idx].plane = kformat[idx].plane;
+ vformat[idx].width = kformat[idx].width;
+ vformat[idx].height = kformat[idx].height;
+
+ ret = __vipx_queue_plane_size(&vformat[idx]);
+ if (ret)
+ goto p_err_plane;
+ }
+
+ vipx_leave();
+ return 0;
+p_err_plane:
+p_err_find:
+ kfree(vformat);
+p_err_alloc:
+ return ret;
+}
+
+static int __vipx_queue_qbuf(struct vipx_queue *queue,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vipx_bundle *bundle;
+ struct vipx_container *con;
+ struct vipx_buffer *buf;
+ int dma_dir;
+ unsigned int c_cnt, b_cnt;
+
+ vipx_enter();
+ if (clist->index >= VIPX_MAX_BUFFER) {
+ ret = -EINVAL;
+ vipx_err("clist index is out of range (%u/%u)\n",
+ clist->index, VIPX_MAX_BUFFER);
goto p_err;
}
- ret = vb_queue_start(otq);
- if (ret) {
- vipx_err("vb_queue_init is fail(%d)\n", ret);
+ if (queue->bufs[clist->index]) {
+ bundle = queue->bufs[clist->index];
+ ret = __vipx_queue_bundle_check(bundle, clist);
+ if (ret)
+ goto p_err;
+ } else {
+ ret = __vipx_queue_bundle_alloc(queue, clist);
+ if (ret)
+ goto p_err;
+
+ bundle = queue->bufs[clist->index];
+ }
+
+ if (bundle->state != VIPX_BUNDLE_STATE_DEQUEUED) {
+ ret = -EINVAL;
+ vipx_err("bundle is already in use (%u)\n", bundle->state);
goto p_err;
}
- ret = CALL_QOPS(queue, start);
- if (ret) {
- vipx_err("CALL_QOPS(start) is fail(%d)\n", ret);
+ ret = __vipx_queue_bundle_prepare(queue, bundle);
+ if (ret)
goto p_err;
+
+ if (queue->direction == VS4L_DIRECTION_OT)
+ dma_dir = DMA_FROM_DEVICE;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ /* TODO: check sync */
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < con[c_cnt].count; ++b_cnt)
+ __dma_map_area(buf[b_cnt].kvaddr, buf[b_cnt].size,
+ dma_dir);
}
+ /*
+ * Add to the queued buffers list, a buffer will stay on it until
+ * dequeued in dqbuf.
+ */
+ list_add_tail(&bundle->queued_entry, &queue->queued_list);
+ bundle->state = VIPX_BUNDLE_STATE_QUEUED;
+ atomic_inc(&queue->queued_count);
+
+ vipx_leave();
+ return 0;
p_err:
return ret;
}
-int vipx_queue_stop(struct vipx_queue *queue)
+static void __vipx_queue_process(struct vipx_queue *queue,
+ struct vipx_bundle *bundle)
{
- int ret = 0;
- struct vb_queue *inq, *otq;
+ vipx_enter();
+ bundle->state = VIPX_BUNDLE_STATE_PROCESS;
+ list_add_tail(&bundle->process_entry, &queue->process_list);
+ atomic_inc(&queue->process_count);
+ vipx_leave();
+}
+
+static int __vipx_queue_dqbuf(struct vipx_queue *queue,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vipx_bundle *bundle;
+
+ vipx_enter();
+ if (queue->direction != clist->direction) {
+ ret = -EINVAL;
+ vipx_err("direction of queue is different (%u/%u)\n",
+ queue->direction, clist->direction);
+ goto p_err;
+ }
+
+ bundle = __vipx_queue_get_done_bundle(queue, clist);
+ if (IS_ERR(bundle)) {
+ ret = PTR_ERR(bundle);
+ goto p_err;
+ }
+
+ if (bundle->state != VIPX_BUNDLE_STATE_DONE) {
+ ret = -EINVAL;
+ vipx_err("bundle state is not done (%u)\n", bundle->state);
+ goto p_err;
+ }
+
+ /* Fill buffer information for the userspace */
+ __vipx_queue_fill_vs4l_buffer(bundle, clist);
+
+ bundle->state = VIPX_BUNDLE_STATE_DEQUEUED;
+ list_del(&bundle->queued_entry);
+ atomic_dec(&queue->queued_count);
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vipx_queue_start(struct vipx_queue *queue)
+{
+ vipx_enter();
+ queue->streaming = 1;
+ vipx_leave();
+ return 0;
+}
+
+static int __vipx_queue_stop(struct vipx_queue *queue)
+{
+ int ret;
+ struct vipx_bundle **bundle;
+ unsigned int idx;
- inq = &queue->inqueue;
- otq = &queue->otqueue;
+ vipx_enter();
+ queue->streaming = 0;
+ wake_up_all(&queue->done_wq);
- ret = CALL_QOPS(queue, stop);
- if (ret) {
- vipx_err("CALL_QOPS(stop) is fail(%d)\n", ret);
+ if (atomic_read(&queue->queued_count)) {
+ ret = -EINVAL;
+ vipx_err("queued list is not empty (%d)\n",
+ atomic_read(&queue->queued_count));
goto p_err;
}
- ret = vb_queue_stop(inq);
- if (ret) {
- vipx_err("vb_queue_init is fail(%d)\n", ret);
+ if (atomic_read(&queue->process_count)) {
+ ret = -EINVAL;
+ vipx_err("process list is not empty (%d)\n",
+ atomic_read(&queue->process_count));
goto p_err;
}
- ret = vb_queue_stop(otq);
- if (ret) {
- vipx_err("vb_queue_init is fail(%d)\n", ret);
+ if (atomic_read(&queue->done_count)) {
+ ret = -EINVAL;
+ vipx_err("done list is not empty (%d)\n",
+ atomic_read(&queue->done_count));
+ goto p_err;
+ }
+
+ bundle = queue->bufs;
+ for (idx = 0; queue->num_buffers && idx < VIPX_MAX_BUFFER; ++idx) {
+ if (!bundle[idx])
+ continue;
+
+ ret = __vipx_queue_bundle_unprepare(queue, bundle[idx]);
+ if (ret)
+ goto p_err;
+
+ __vipx_queue_bundle_free(queue, bundle[idx]);
+ }
+
+ if (queue->num_buffers) {
+ ret = -EINVAL;
+ vipx_err("bundle leak issued (%u)\n", queue->num_buffers);
goto p_err;
}
+ vipx_leave();
+ return 0;
p_err:
return ret;
}
-int vipx_queue_poll(struct vipx_queue *queue, struct file *file, poll_table *poll)
+static int vipx_queue_poll(struct vipx_queue_list *qlist, struct file *file,
+ struct poll_table_struct *poll)
{
int ret = 0;
- struct vb_queue *inq, *otq;
unsigned long events;
+ struct vipx_queue *inq, *outq;
- BUG_ON(!queue);
- BUG_ON(!file);
- BUG_ON(!poll);
-
+ vipx_enter();
events = poll_requested_events(poll);
- inq = &queue->inqueue;
- otq = &queue->otqueue;
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
if (events & POLLIN) {
if (list_empty(&inq->done_list))
}
if (events & POLLOUT) {
- if (list_empty(&otq->done_list))
- poll_wait(file, &otq->done_wq, poll);
+ if (list_empty(&outq->done_list))
+ poll_wait(file, &outq->done_wq, poll);
- if (list_empty(&otq->done_list))
+ if (list_empty(&outq->done_list))
ret |= POLLOUT | POLLWRNORM;
}
+ vipx_leave();
return ret;
}
-int vipx_queue_qbuf(struct vipx_queue *queue, struct vs4l_container_list *c)
+static int vipx_queue_set_graph(struct vipx_queue_list *qlist,
+ struct vs4l_graph *ginfo)
{
- int ret = 0;
- struct vb_queue *q, *inq, *otq;
- struct vb_bundle *invb, *otvb;
+ int ret;
- inq = &queue->inqueue;
- otq = &queue->otqueue;
+ vipx_enter();
+ ret = qlist->gops->set_graph(qlist->vctx->graph, ginfo);
+ if (ret)
+ goto p_err_gops;
- if (c->direction == VS4L_DIRECTION_IN)
- q = inq;
+ vipx_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+static int vipx_queue_set_format(struct vipx_queue_list *qlist,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+ struct vipx_queue *queue;
+
+ vipx_enter();
+ if (flist->direction == VS4L_DIRECTION_IN)
+ queue = &qlist->in_queue;
else
- q = otq;
+ queue = &qlist->out_queue;
- ret = vb_queue_qbuf(q, c);
- if (ret) {
- vipx_err("vb_queue_qbuf is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = qlist->gops->set_format(qlist->vctx->graph, flist);
+ if (ret)
+ goto p_err_gops;
- if (list_empty(&inq->queued_list))
- goto p_err;
+ ret = __vipx_queue_s_format(queue, flist);
+ if (ret)
+ goto p_err_s_format;
- if (list_empty(&otq->queued_list))
- goto p_err;
+ vipx_leave();
+ return 0;
+p_err_s_format:
+p_err_gops:
+ return ret;
+}
- invb = list_first_entry(&inq->queued_list, struct vb_bundle, queued_entry);
- otvb = list_first_entry(&otq->queued_list, struct vb_bundle, queued_entry);
+static int vipx_queue_set_param(struct vipx_queue_list *qlist,
+ struct vs4l_param_list *plist)
+{
+ int ret;
- vb_queue_process(inq, invb);
- vb_queue_process(otq, otvb);
+ vipx_enter();
+ ret = qlist->gops->set_param(qlist->vctx->graph, plist);
+ if (ret)
+ goto p_err_gops;
- ret = CALL_QOPS(queue, queue, &invb->clist, &otvb->clist);
- if (ret) {
- vipx_err("CALL_QOPS(queue) is fail(%d)\n", ret);
+ vipx_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+static int vipx_queue_set_ctrl(struct vipx_queue_list *qlist,
+ struct vs4l_ctrl *ctrl)
+{
+ int ret;
+
+ vipx_enter();
+ switch (ctrl->ctrl) {
+ default:
+ ret = -EINVAL;
+ vipx_err("vs4l control is invalid(%d)\n", ctrl->ctrl);
goto p_err;
}
+ vipx_leave();
+ return 0;
p_err:
return ret;
}
-int vipx_queue_dqbuf(struct vipx_queue *queue, struct vs4l_container_list *c, bool nonblocking)
+static int vipx_queue_qbuf(struct vipx_queue_list *qlist,
+ struct vs4l_container_list *clist)
{
- int ret = 0;
- struct vb_queue *q;
- struct vb_bundle *bundle;
+ int ret;
+ struct vipx_queue *inq, *outq, *queue;
+ struct vipx_bundle *invb, *otvb;
- BUG_ON(!queue);
+ vipx_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
- if (c->direction == VS4L_DIRECTION_IN)
- q = &queue->inqueue;
+ if (clist->direction == VS4L_DIRECTION_IN)
+ queue = inq;
else
- q = &queue->otqueue;
+ queue = outq;
- ret = vb_queue_dqbuf(q, c, nonblocking);
- if (ret) {
- vipx_err("vb_queue_dqbuf is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = __vipx_queue_qbuf(queue, clist);
+ if (ret)
+ goto p_err_qbuf;
+
+ if (list_empty(&inq->queued_list) || list_empty(&outq->queued_list))
+ return 0;
+
+ invb = list_first_entry(&inq->queued_list,
+ struct vipx_bundle, queued_entry);
+ otvb = list_first_entry(&outq->queued_list,
+ struct vipx_bundle, queued_entry);
+
+ __vipx_queue_process(inq, invb);
+ __vipx_queue_process(outq, otvb);
+
+ ret = qlist->gops->queue(qlist->vctx->graph,
+ &invb->clist, &otvb->clist);
+ if (ret)
+ goto p_err_gops;
+
+ vipx_leave();
+ return 0;
+p_err_gops:
+p_err_qbuf:
+ return ret;
+}
- if (c->index >= VIPX_MAX_BUFFER) {
- vipx_err("container index(%d) is invalid\n", c->index);
+static int vipx_queue_dqbuf(struct vipx_queue_list *qlist,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vipx_queue *queue;
+ struct vipx_bundle *bundle;
+
+ vipx_enter();
+ if (clist->direction == VS4L_DIRECTION_IN)
+ queue = &qlist->in_queue;
+ else
+ queue = &qlist->out_queue;
+
+ ret = __vipx_queue_dqbuf(queue, clist);
+ if (ret)
+ goto p_err_dqbuf;
+
+ if (clist->index >= VIPX_MAX_BUFFER) {
ret = -EINVAL;
- goto p_err;
+ vipx_err("clist index is out of range (%u/%u)\n",
+ clist->index, VIPX_MAX_BUFFER);
+ goto p_err_index;
}
- bundle = q->bufs[c->index];
+ bundle = queue->bufs[clist->index];
if (!bundle) {
- vipx_err("bundle(%d) is NULL\n", c->index);
ret = -EINVAL;
- goto p_err;
+ vipx_err("bundle(%u) is NULL\n", clist->index);
+ goto p_err_bundle;
}
- if (bundle->clist.index != c->index) {
- vipx_err("index is NOT matched(%d != %d)\n", bundle->clist.index, c->index);
+ if (bundle->clist.index != clist->index) {
ret = -EINVAL;
- goto p_err;
+ vipx_err("index of clist is different (%u/%u)\n",
+ bundle->clist.index, clist->index);
+ goto p_err_bundle_index;
}
- ret = CALL_QOPS(queue, deque, &bundle->clist);
- if (ret) {
- vipx_err("CALL_QOPS(deque) is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = qlist->gops->deque(qlist->vctx->graph, &bundle->clist);
+ if (ret)
+ goto p_err_gops;
+
+ vipx_leave();
+ return 0;
+p_err_gops:
+p_err_bundle_index:
+p_err_bundle:
+p_err_index:
+p_err_dqbuf:
+ return ret;
+}
-p_err:
+static int vipx_queue_streamon(struct vipx_queue_list *qlist)
+{
+ int ret;
+ struct vipx_queue *inq, *outq;
+
+ vipx_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ __vipx_queue_start(inq);
+ __vipx_queue_start(outq);
+
+ ret = qlist->gops->start(qlist->vctx->graph);
+ if (ret)
+ goto p_err_gops;
+
+ vipx_leave();
+ return 0;
+p_err_gops:
return ret;
}
-void vipx_queue_done(struct vipx_queue *queue,
- struct vb_container_list *incl,
- struct vb_container_list *otcl,
- unsigned long flags)
+static int vipx_queue_streamoff(struct vipx_queue_list *qlist)
{
- struct vb_queue *inq, *otq;
- struct vb_bundle *invb, *otvb;
+ int ret;
+ struct vipx_queue *inq, *outq;
+
+ vipx_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
- BUG_ON(!queue);
- BUG_ON(!incl);
- BUG_ON(!otcl);
+ ret = qlist->gops->stop(qlist->vctx->graph);
+ if (ret)
+ goto p_err_gops;
- inq = &queue->inqueue;
- otq = &queue->otqueue;
+ __vipx_queue_stop(inq);
+ __vipx_queue_stop(outq);
- if (list_empty(&inq->process_list)) {
- vipx_err("inqueue is empty\n");
- BUG();
+ vipx_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+const struct vipx_context_qops vipx_context_qops = {
+ .poll = vipx_queue_poll,
+ .set_graph = vipx_queue_set_graph,
+ .set_format = vipx_queue_set_format,
+ .set_param = vipx_queue_set_param,
+ .set_ctrl = vipx_queue_set_ctrl,
+ .qbuf = vipx_queue_qbuf,
+ .dqbuf = vipx_queue_dqbuf,
+ .streamon = vipx_queue_streamon,
+ .streamoff = vipx_queue_streamoff
+};
+
+static void __vipx_queue_done(struct vipx_queue *queue,
+ struct vipx_bundle *bundle)
+{
+ int dma_dir;
+ struct vipx_container *con;
+ struct vipx_buffer *buf;
+ unsigned int c_cnt, b_cnt;
+ unsigned long flags;
+
+ vipx_enter();
+ if (queue->direction != bundle->clist.direction) {
+ vipx_err("direction of queue is different (%u/%u)\n",
+ queue->direction, bundle->clist.direction);
}
- if (list_empty(&otq->process_list)) {
- vipx_err("otqueue is empty\n");
- BUG();
+ if (queue->direction == VS4L_DIRECTION_OT)
+ dma_dir = DMA_FROM_DEVICE;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ /* TODO: check sync */
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < con[c_cnt].count; ++b_cnt)
+ __dma_map_area(buf[b_cnt].kvaddr, buf[b_cnt].size,
+ dma_dir);
}
- invb = container_of(incl, struct vb_bundle, clist);
- otvb = container_of(otcl, struct vb_bundle, clist);
+ spin_lock_irqsave(&queue->done_lock, flags);
+
+ list_del(&bundle->process_entry);
+ atomic_dec(&queue->process_count);
+
+ bundle->state = VIPX_BUNDLE_STATE_DONE;
+ list_add_tail(&bundle->done_entry, &queue->done_list);
+ atomic_inc(&queue->done_count);
- if (invb->state != VB_BUF_STATE_PROCESS) {
- vipx_err("invb state(%d) is invalid\n", invb->state);
- BUG();
+ spin_unlock_irqrestore(&queue->done_lock, flags);
+ wake_up(&queue->done_wq);
+ vipx_leave();
+}
+
+void vipx_queue_done(struct vipx_queue_list *qlist,
+ struct vipx_container_list *incl,
+ struct vipx_container_list *otcl,
+ unsigned long flags)
+{
+ struct vipx_queue *inq, *outq;
+ struct vipx_bundle *invb, *otvb;
+
+ vipx_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ if (!list_empty(&outq->process_list)) {
+ otvb = container_of(otcl, struct vipx_bundle, clist);
+ if (otvb->state == VIPX_BUNDLE_STATE_PROCESS) {
+ otvb->flags |= flags;
+ __vipx_queue_done(outq, otvb);
+ } else {
+ vipx_err("out-bundle state(%d) is not process\n",
+ otvb->state);
+ }
+ } else {
+ vipx_err("out-queue is empty\n");
}
- if (otvb->state != VB_BUF_STATE_PROCESS) {
- vipx_err("otvb state(%d) is invalid\n", otvb->state);
- BUG();
+ if (!list_empty(&inq->process_list)) {
+ invb = container_of(incl, struct vipx_bundle, clist);
+ if (invb->state == VIPX_BUNDLE_STATE_PROCESS) {
+ invb->flags |= flags;
+ __vipx_queue_done(inq, invb);
+ } else {
+ vipx_err("in-bundle state(%u) is not process\n",
+ invb->state);
+ }
+ } else {
+ vipx_err("in-queue is empty\n");
}
- otvb->flags |= flags;
- vb_queue_done(otq, otvb);
+ vipx_leave();
+}
+
+static void __vipx_queue_init(struct vipx_context *vctx,
+ struct vipx_queue *queue, unsigned int direction)
+{
+ vipx_enter();
+ queue->direction = direction;
+ queue->streaming = 0;
+ queue->lock = &vctx->lock;
+
+ INIT_LIST_HEAD(&queue->queued_list);
+ atomic_set(&queue->queued_count, 0);
+ INIT_LIST_HEAD(&queue->process_list);
+ atomic_set(&queue->process_count, 0);
+ INIT_LIST_HEAD(&queue->done_list);
+ atomic_set(&queue->done_count, 0);
+
+ spin_lock_init(&queue->done_lock);
+ init_waitqueue_head(&queue->done_wq);
+
+ queue->flist.count = 0;
+ queue->flist.formats = NULL;
+ queue->num_buffers = 0;
+ queue->qlist = &vctx->queue_list;
+ vipx_leave();
+}
+
+int vipx_queue_init(struct vipx_context *vctx)
+{
+ struct vipx_queue_list *qlist;
+ struct vipx_queue *inq, *outq;
+
+ vipx_enter();
+ vctx->queue_ops = &vipx_context_qops;
+
+ qlist = &vctx->queue_list;
+ qlist->vctx = vctx;
+ qlist->gops = &vipx_queue_gops;
+
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ __vipx_queue_init(vctx, inq, VS4L_DIRECTION_IN);
+ __vipx_queue_init(vctx, outq, VS4L_DIRECTION_OT);
- invb->flags |= flags;
- vb_queue_done(inq, invb);
+ vipx_leave();
+ return 0;
}
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef VIPX_QUEUE_H_
-#define VIPX_QUEUE_H_
+#ifndef __VIPX_QUEUE_H__
+#define __VIPX_QUEUE_H__
+#include <linux/poll.h>
#include <linux/types.h>
#include <linux/mutex.h>
-#include "vision-buffer.h"
-#include "vipx-taskmgr.h"
+#include "vipx-config.h"
+#include "vs4l.h"
#include "vipx-memory.h"
+#include "vipx-graph.h"
-struct vipx_queue;
+struct vipx_context;
+struct vipx_queue_list;
-struct vipx_queue_ops {
- int (*start)(struct vipx_queue *queue);
- int (*stop)(struct vipx_queue *queue);
- int (*format)(struct vipx_queue *queue, struct vs4l_format_list *f);
- int (*queue)(struct vipx_queue *queue, struct vb_container_list *incl, struct vb_container_list *otcl);
- int (*deque)(struct vipx_queue *queue, struct vb_container_list *clist);
+struct vipx_format_type {
+ char *name;
+ unsigned int colorspace;
+ unsigned int planes;
+ unsigned int bitsperpixel[VIPX_MAX_PLANES];
+};
+
+struct vipx_buffer_format {
+ unsigned int target;
+ struct vipx_format_type *fmt;
+ unsigned int colorspace;
+ unsigned int plane;
+ unsigned int width;
+ unsigned int height;
+ unsigned int size[VIPX_MAX_PLANES];
+};
+
+struct vipx_buffer_format_list {
+ unsigned int count;
+ struct vipx_buffer_format *formats;
+};
+
+struct vipx_container {
+ unsigned int type;
+ unsigned int target;
+ unsigned int memory;
+ unsigned int reserved[4];
+ unsigned int count;
+ struct vipx_buffer *buffers;
+ struct vipx_buffer_format *format;
+};
+
+struct vipx_container_list {
+ unsigned int direction;
+ unsigned int id;
+ unsigned int index;
+ unsigned long flags;
+ struct timeval timestamp[6];
+ unsigned int count;
+ unsigned int user_params[MAX_NUM_OF_USER_PARAMS];
+ struct vipx_container *containers;
+};
+
+enum vipx_bundle_state {
+ VIPX_BUNDLE_STATE_DEQUEUED,
+ VIPX_BUNDLE_STATE_QUEUED,
+ VIPX_BUNDLE_STATE_PROCESS,
+ VIPX_BUNDLE_STATE_DONE,
+};
+
+struct vipx_bundle {
+ unsigned long flags;
+ unsigned int state;
+ struct list_head queued_entry;
+ struct list_head process_entry;
+ struct list_head done_entry;
+
+ struct vipx_container_list clist;
};
struct vipx_queue {
- struct vb_queue inqueue;
- struct vb_queue otqueue;
- const struct vipx_queue_ops *qops;
+ unsigned int direction;
+ unsigned int streaming;
+ struct mutex *lock;
+
+ struct list_head queued_list;
+ atomic_t queued_count;
+ struct list_head process_list;
+ atomic_t process_count;
+ struct list_head done_list;
+ atomic_t done_count;
+
+ spinlock_t done_lock;
+ wait_queue_head_t done_wq;
+
+ struct vipx_buffer_format_list flist;
+ struct vipx_bundle *bufs[VIPX_MAX_BUFFER];
+ unsigned int num_buffers;
+
+ struct vipx_queue_list *qlist;
+};
+
+struct vipx_queue_gops {
+ int (*set_graph)(struct vipx_graph *graph, struct vs4l_graph *ginfo);
+ int (*set_format)(struct vipx_graph *graph,
+ struct vs4l_format_list *flist);
+ int (*set_param)(struct vipx_graph *graph,
+ struct vs4l_param_list *plist);
+ int (*set_ctrl)(struct vipx_graph *graph, struct vs4l_ctrl *ctrl);
+ int (*queue)(struct vipx_graph *graph,
+ struct vipx_container_list *incl,
+ struct vipx_container_list *otcl);
+ int (*deque)(struct vipx_graph *graph,
+ struct vipx_container_list *clist);
+ int (*start)(struct vipx_graph *graph);
+ int (*stop)(struct vipx_graph *graph);
+};
+
+struct vipx_queue_list {
+ struct vipx_queue in_queue;
+ struct vipx_queue out_queue;
+ const struct vipx_queue_gops *gops;
+ struct vipx_context *vctx;
};
-int vipx_queue_init(struct vipx_queue *queue, struct vipx_memory *memory, struct mutex *lock);
-int vipx_queue_s_format(struct vipx_queue *queue, struct vs4l_format_list *f);
-int vipx_queue_start(struct vipx_queue *queue);
-int vipx_queue_stop(struct vipx_queue *queue);
-int vipx_queue_poll(struct vipx_queue *queue, struct file *file, poll_table *poll);
-int vipx_queue_qbuf(struct vipx_queue *queue, struct vs4l_container_list *c);
-int vipx_queue_dqbuf(struct vipx_queue *queue, struct vs4l_container_list *c, bool nonblocking);
-void vipx_queue_done(struct vipx_queue *queue, struct vb_container_list *incl, struct vb_container_list *otcl, unsigned long flags);
+void vipx_queue_done(struct vipx_queue_list *qlist,
+ struct vipx_container_list *inclist,
+ struct vipx_container_list *otclist,
+ unsigned long flags);
-#define CALL_QOPS(q, op, ...) (((q)->qops->op) ? ((q)->qops->op(q, ##__VA_ARGS__)) : 0)
+int vipx_queue_init(struct vipx_context *vctx);
#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vipx-log.h"
+#include "vipx-message.h"
+#include "vipx-slab.h"
+
+int vipx_slab_alloc(struct vipx_slab_allocator *allocator, void **target,
+ size_t size)
+{
+ int ret;
+
+ vipx_enter();
+ if (size <= allocator->cache_size) {
+ ret = -EINVAL;
+ vipx_err("size is too big tahn slab size (%zu, %zu)\n",
+ size, allocator->cache_size);
+ goto p_err;
+ }
+
+ *target = kmem_cache_alloc(allocator->cache, GFP_KERNEL);
+ if (!(*target)) {
+ ret = -ENOMEM;
+ vipx_err("kmem_cache_alloc is fail\n");
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+void vipx_slab_free(struct vipx_slab_allocator *allocator, void *target)
+{
+ vipx_enter();
+ kmem_cache_free(allocator->cache, target);
+ vipx_leave();
+}
+
+int vipx_slab_init(struct vipx_slab_allocator *allocator)
+{
+#ifdef TODO_CHECK
+ int ret;
+ char name[100];
+ size_t size;
+
+ vipx_enter();
+ size = sizeof(struct vipx_message);
+ allocator->cache_size = size;
+
+ snprintf(name, sizeof(name), "vipx-slab-%zu", size);
+
+ allocator->cache = kmem_cache_create(name, size,
+ ARCH_KMALLOC_MINALIGN, SLAB_POISON | SLAB_PANIC, NULL);
+ if (!allocator->cache) {
+ ret = -ENOMEM;
+ vipx_err("kmem_cache_create(%zu) is fail\n", size);
+ goto p_err;
+ }
+
+ vipx_leave();
+ return 0;
+p_err:
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+void vipx_slab_deinit(struct vipx_slab_allocator *allocator)
+{
+ kmem_cache_destroy(allocator->cache);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_SLAB_H__
+#define __VIPX_SLAB_H__
+
+#include <linux/slab.h>
+
+struct vipx_slab_allocator {
+ size_t cache_size;
+ struct kmem_cache *cache;
+};
+
+int vipx_slab_alloc(struct vipx_slab_allocator *allocator, void **target,
+ size_t size);
+void vipx_slab_free(struct vipx_slab_allocator *allocator, void *target);
+
+int vipx_slab_init(struct vipx_slab_allocator *allocator);
+void vipx_slab_deinit(struct vipx_slab_allocator *allocator);
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/pm_qos.h>
#include <linux/exynos_iovmm.h>
-#include <asm/cacheflush.h>
-#include <linux/io.h>
-#include "vipx-config.h"
+#include "vipx-log.h"
#include "vipx-device.h"
#include "vipx-system.h"
-#define CAM_L0 690000
-#define CAM_L1 680000
-#define CAM_L2 670000
-#define CAM_L3 660000
-#define CAM_L4 650000
-
-#define MIF_L0 2093000
-#define MIF_L1 2002000
-#define MIF_L2 1794000
-#define MIF_L3 1539000
-#define MIF_L4 1352000
-#define MIF_L5 1014000
-#define MIF_L6 845000
-#define MIF_L7 676000
+#define CAM_L0 (690000)
+#define CAM_L1 (680000)
+#define CAM_L2 (670000)
+#define CAM_L3 (660000)
+#define CAM_L4 (650000)
+
+#define MIF_L0 (2093000)
+#define MIF_L1 (2002000)
+#define MIF_L2 (1794000)
+#define MIF_L3 (1539000)
+#define MIF_L4 (1352000)
+#define MIF_L5 (1014000)
+#define MIF_L6 (845000)
+#define MIF_L7 (676000)
+#if defined(CONFIG_PM_DEVFREQ)
struct pm_qos_request exynos_vipx_qos_cam;
struct pm_qos_request exynos_vipx_qos_mem;
+#endif
-int vipx_system_probe(struct vipx_system *system, struct platform_device *pdev)
+int vipx_system_fw_bootup(struct vipx_system *sys)
{
- int ret = 0;
- struct device *dev;
- struct resource *res;
- void __iomem *iomem;
- int irq;
-
- BUG_ON(!system);
- BUG_ON(!pdev);
-
- dev = &pdev->dev;
- system->cam_qos = 0;
- system->mif_qos = 0;
-
- /* VIPX_CPU_SS1 */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- probe_err("platform_get_resource(0) is fail(%p)", res);
- ret = PTR_ERR(res);
- goto p_err;
- }
-
- iomem = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (IS_ERR_OR_NULL(iomem)) {
- probe_err("devm_ioremap_resource(0) is fail(%p)", iomem);
- ret = PTR_ERR(iomem);
- goto p_err;
- }
-
- system->reg_ss[VIPX_REG_CPU_SS1] = iomem;
- system->reg_ss_size[VIPX_REG_CPU_SS1] = resource_size(res);
- probe_info("resource0 : %p\n", iomem);
-
- /* VIPX_CPU_SS2 */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- probe_err("platform_get_resource(1) is fail(%p)", res);
- ret = PTR_ERR(res);
- goto p_err;
- }
-
- iomem = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (IS_ERR_OR_NULL(iomem)) {
- probe_err("devm_ioremap_resource(1) is fail(%p)", iomem);
- ret = PTR_ERR(iomem);
- goto p_err;
- }
-
- system->reg_ss[VIPX_REG_CPU_SS2] = iomem;
- system->reg_ss_size[VIPX_REG_CPU_SS2] = resource_size(res);
- probe_info("resource1 : %p\n", iomem);
+ int ret;
+ struct vipx_memory *mem;
+ struct vipx_binary *bin;
- /* VIPX ITCM */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (!res) {
- probe_err("platform_get_resource(2) is fail(%p)", res);
- ret = PTR_ERR(res);
- goto p_err;
- }
-
- iomem = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (IS_ERR_OR_NULL(iomem)) {
- probe_err("devm_ioremap_resource(2) is fail(%p)", iomem);
- ret = PTR_ERR(iomem);
- goto p_err;
- }
-
- system->itcm = iomem;
- system->itcm_size = resource_size(res);
- probe_info("resource2 : %p\n", iomem);
-
- /* VIPX DTCM */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (!res) {
- probe_err("platform_get_resource(3) is fail(%p)", res);
- ret = PTR_ERR(res);
- goto p_err;
- }
-
- iomem = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (IS_ERR_OR_NULL(iomem)) {
- probe_err("devm_ioremap_resource(3) is fail(%p)", iomem);
- ret = PTR_ERR(iomem);
+ struct {
+ unsigned int mbox_addr;
+ unsigned int mbox_size;
+ unsigned int heap_addr;
+ unsigned int heap_size;
+ unsigned int debug_addr;
+ unsigned int debug_size;
+ } *shared_mem;
+
+ vipx_enter();
+ /* TODO check cam/bus */
+ //execl("/system/bin/cp", "/system/bin/cp", "/d/pm_qos/cam_throughput",
+ // "/data", (char *)0);
+ //execl("/system/bin/cp", "/system/bin/cp", "/d/pm_qos/bus_throughput",
+ // "/data", (char *)0);
+ mem = &sys->memory;
+ bin = &sys->binary;
+
+ ret = sys->ctrl_ops->reset(sys);
+ if (ret)
goto p_err;
- }
- system->dtcm = iomem;
- system->dtcm_size = resource_size(res);
- probe_info("resource3 : %p\n", iomem);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- probe_err("platform_get_irq(0) is fail(%d)", irq);
- ret = -EINVAL;
+ ret = vipx_binary_read(bin, NULL, VIPX_FW_DRAM_NAME, mem->fw.kvaddr,
+ mem->fw.size);
+ if (ret)
goto p_err;
- }
-
- system->irq0 = irq;
- irq = platform_get_irq(pdev, 1);
- if (irq < 0) {
- probe_err("platform_get_irq(1) is fail(%d)", irq);
- ret = -EINVAL;
+ ret = vipx_binary_read(bin, NULL, VIPX_FW_DTCM_NAME, sys->dtcm,
+ sys->dtcm_size);
+ if (ret)
goto p_err;
- }
- system->irq1 = irq;
+ shared_mem = sys->dtcm + 0x3FD0;
+ shared_mem->mbox_addr = mem->mbox.dvaddr;
+ shared_mem->mbox_size = mem->mbox.size;
+ shared_mem->heap_addr = mem->heap.dvaddr;
+ shared_mem->heap_size = mem->heap.size;
+ shared_mem->debug_addr = mem->debug.dvaddr;
+ shared_mem->debug_size = mem->debug.size;
- ret = vipx_exynos_probe(&system->exynos, dev, system->reg_ss, system->itcm, system->dtcm);
- if (ret) {
- probe_err("vipx_exynos_probe is fail(%d)\n", ret);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = vipx_memory_probe(&system->memory, dev);
- if (ret) {
- vipx_err("vipx_memory_probe is fail(%d)\n", ret);
+ ret = vipx_binary_read(bin, NULL, VIPX_FW_ITCM_NAME,
+ sys->itcm, sys->itcm_size);
+ if (ret)
goto p_err;
- }
- ret = vipx_interface_probe(&system->interface, dev,
- system->reg_ss[VIPX_REG_CPU_SS1], system->reg_ss_size[VIPX_REG_CPU_SS1],
- system->irq0, system->irq1);
- if (ret) {
- vipx_err("vipx_interface_probe is fail(%d)\n", ret);
+ ret = sys->ctrl_ops->start(sys);
+ if (ret)
goto p_err;
- }
- ret = vipx_binary_init(&system->binary, dev);
- if (ret) {
- vipx_err("vipx_binary_init is fail(%d)\n", ret);
+ ret = vipx_hw_wait_bootup(&sys->interface);
+ if (ret)
goto p_err;
- }
+ vipx_leave();
+ return 0;
p_err:
- probe_info("%s():%d\n", __func__, ret);
return ret;
}
-int vipx_system_open(struct vipx_system *system)
+int vipx_system_start(struct vipx_system *sys)
{
- int ret = 0;
+ int ret;
- ret = vipx_memory_open(&system->memory);
- if (ret) {
- vipx_err("vipx_memory_open is fail(%d)\n", ret);
- goto p_err;
- }
-
- ret = vipx_interface_open(&system->interface,
- (void *)system->memory.info.kvaddr_mbox, VIPX_MBOX_SIZE);
- if (ret) {
- vipx_err("vipx_interface_open is fail(%d)\n", ret);
- vipx_memory_close(&system->memory);
+ vipx_enter();
+ ret = vipx_interface_start(&sys->interface);
+ if (ret)
goto p_err;
- }
+ vipx_leave();
p_err:
- vipx_info("%s():%d\n", __func__, ret);
return ret;
}
-int vipx_system_close(struct vipx_system *system)
+int vipx_system_stop(struct vipx_system *sys)
{
- int ret = 0;
-
-#ifdef DUMP_DEBUG_LOG_REGION
- struct vipx_binary *binary;
- binary = &system->binary;
-
-// flush_all_cpu_caches();
+ int ret;
- vipx_info("debug kva %p, heap kva %p\n", system->memory.info.kvaddr_debug, system->memory.info.kvaddr_heap);
- system->memory.info.pb_debug->ops->sync_for_cpu(system->memory.info.pb_debug, 0, system->memory.info.pb_debug->size, 0);
- if (!IS_ERR_OR_NULL(system->memory.info.kvaddr_debug)) {
- ret = vipx_binary_write(binary, VIPX_FW_PATH1, "vipx_log.bin", system->memory.info.kvaddr_debug, VIPX_DEBUG_SIZE);
- if (ret)
- vipx_err("vipx_binary_write is fail(%d)\n", ret);
- }
-
-#endif
-
- ret = vipx_interface_close(&system->interface);
- if (ret)
- vipx_err("vipx_interface_close is fail(%d)\n", ret);
-
- ret = vipx_memory_close(&system->memory);
+ vipx_enter();
+ ret = vipx_interface_stop(&sys->interface);
if (ret)
- vipx_err("vipx_memory_close is fail(%d)\n", ret);
+ goto p_err;
+ vipx_leave();
+p_err:
return ret;
}
-int vipx_system_resume(struct vipx_system *system, u32 mode)
+int vipx_system_resume(struct vipx_system *sys)
{
- int ret = 0;
- struct vipx_exynos *exynos;
- struct vipx_memory *memory;
-
- BUG_ON(!system);
-
- exynos = &system->exynos;
- memory = &system->memory;
-
- ret = CLK_OP(exynos, clk_cfg);
- if (ret) {
- vipx_err("CLK_OP(clk_cfg) is fail(%d)\n", ret);
- goto p_err;
- }
+ int ret;
+ vipx_enter();
#if defined(CONFIG_PM_DEVFREQ)
pm_qos_add_request(&exynos_vipx_qos_cam, PM_QOS_CAM_THROUGHPUT, CAM_L0);
pm_qos_add_request(&exynos_vipx_qos_mem, PM_QOS_BUS_THROUGHPUT, MIF_L0);
#endif
- ret = CLK_OP(exynos, clk_on);
- if (ret) {
- vipx_err("CLK_OP(clk_on) is fail(%d)\n", ret);
- goto p_err;
- }
+ ret = sys->clk_ops->clk_on(sys);
+ if (ret)
+ goto p_err_clk_on;
-#if defined(CONFIG_VIDEOBUF2_DMA_SG)
- system->memory.vipx_mem_ops->resume(system->memory.default_ctx);
-#endif
+ sys->clk_ops->clk_dump(sys);
- if (mode == VIPX_DEVICE_MODE_TEST)
- goto p_err;
-
- ret = vipx_system_fw_bootup(system);
+ ret = iovmm_activate(sys->dev);
if (ret) {
- vipx_err("vipx_system_fw_bootup is fail(%d)\n", ret);
- goto p_err;
+ vipx_err("Failed to activate iommu (%d)\n", ret);
+ goto p_err_iovmm;
}
-p_err:
+ vipx_leave();
+ return 0;
+p_err_iovmm:
+ sys->clk_ops->clk_off(sys);
+p_err_clk_on:
+ pm_qos_remove_request(&exynos_vipx_qos_mem);
+ pm_qos_remove_request(&exynos_vipx_qos_cam);
return ret;
}
-int vipx_system_suspend(struct vipx_system *system)
+int vipx_system_suspend(struct vipx_system *sys)
{
- int ret = 0;
- struct vipx_exynos *exynos;
- struct vipx_memory *memory;
-
- BUG_ON(!system);
+ vipx_enter();
+ iovmm_deactivate(sys->dev);
- exynos = &system->exynos;
- memory = &system->memory;
+ //TODO check this
+ //ret = sys->ctrl_ops->reset(sys);
+ //if (ret)
+ // vipx_err("Failed to reset for power down (%d)\n", ret);
-#if defined(CONFIG_VIDEOBUF2_DMA_SG)
- system->memory.vipx_mem_ops->suspend(system->memory.default_ctx);
-#endif
-
-#if 0
- ret = CTL_OP(exynos, ctl_reset, 0);
- if (ret)
- vipx_err("CTL_OP(ctl_reset) is fail(%d)\n", ret);
-#endif
- ret = CLK_OP(exynos, clk_off);
- if (ret)
- vipx_err("CLK_OP(clk_off) is fail(%d)\n", ret);
+ sys->clk_ops->clk_off(sys);
#if defined(CONFIG_PM_DEVFREQ)
- pm_qos_remove_request(&exynos_vipx_qos_cam);
pm_qos_remove_request(&exynos_vipx_qos_mem);
+ pm_qos_remove_request(&exynos_vipx_qos_cam);
#endif
- return ret;
+ vipx_leave();
+ return 0;
}
-int vipx_system_start(struct vipx_system *system)
+int vipx_system_open(struct vipx_system *sys)
{
- int ret = 0;
+ int ret;
- ret = vipx_interface_start(&system->interface);
+ vipx_enter();
+ ret = vipx_memory_open(&sys->memory);
if (ret)
- vipx_err("vipx_interface_start is fail(%d)\n", ret);
+ goto p_err_memory;
- vipx_info("%s():%d\n", __func__, ret);
+ ret = vipx_interface_open(&sys->interface,
+ sys->memory.mbox.kvaddr);
+ if (ret)
+ goto p_err_interface;
+
+ ret = vipx_graphmgr_open(&sys->graphmgr);
+ if (ret)
+ goto p_err_graphmgr;
+
+ vipx_leave();
+ return 0;
+p_err_graphmgr:
+ vipx_interface_close(&sys->interface);
+p_err_interface:
+ vipx_memory_close(&sys->memory);
+p_err_memory:
return ret;
}
-int vipx_system_stop(struct vipx_system *system)
+int vipx_system_close(struct vipx_system *sys)
{
- int ret = 0;
-
- ret = vipx_interface_stop(&system->interface);
- if (ret)
- vipx_err("vipx_interface_stop is fail(%d)\n", ret);
-
- vipx_info("%s():%d\n", __func__, ret);
- return ret;
+ vipx_enter();
+ vipx_graphmgr_close(&sys->graphmgr);
+ vipx_interface_close(&sys->interface);
+ vipx_memory_close(&sys->memory);
+ vipx_leave();
+ return 0;
}
-int vipx_system_fw_bootup(struct vipx_system *system)
+int vipx_system_probe(struct vipx_device *device)
{
- int ret = 0;
- struct vipx_exynos *exynos;
- struct vipx_memory *memory;
- struct vipx_binary *binary;
+ int ret;
+ struct vipx_system *sys;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct resource *res;
+ void __iomem *iomem;
- struct {
- volatile u32 sign;
- volatile u32 mbox_addr;
- volatile u32 mbox_size;
- volatile u32 debug_addr;
- volatile u32 debug_size;
- } *tail_p;
-
- BUG_ON(!system);
-
-// execl("/system/bin/cp", "/system/bin/cp", "/d/pm_qos/cam_throughput", "/data", (char *)0);
-// execl("/system/bin/cp", "/system/bin/cp", "/d/pm_qos/bus_throughput", "/data", (char *)0);
- exynos = &system->exynos;
- memory = &system->memory;
- binary = &system->binary;
-
- ret = CTL_OP(exynos, ctl_reset, 0);
- if (ret) {
- vipx_err("CTL_OP(ctl_reset) is fail(%d)\n", ret);
- goto p_err;
+ vipx_enter();
+ sys = &device->system;
+ sys->device = device;
+
+ pdev = to_platform_device(device->dev);
+ dev = device->dev;
+ sys->dev = dev;
+
+ /* VIPX_CPU_SS1 */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ vipx_err("platform_get_resource(0) is fail");
+ goto p_err_res_ss1;
}
- ret = vipx_binary_read(binary, VIPX_FW_PATH1, VIPX_FW_DRAM_NAME,
- (void *)system->memory.info.kvaddr_fw, VIPX_CM7_DRAM_BIN_SIZE);
- if (ret) {
- ret = vipx_binary_read(binary, VIPX_FW_PATH2, VIPX_FW_DRAM_NAME,
- (void *)system->memory.info.kvaddr_fw, VIPX_CM7_DRAM_BIN_SIZE);
- if (ret) {
- vipx_err("vipx_dram_binary_load is fail(%d)\n", ret);
- goto p_err;
- }
+ iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vipx_err("devm_ioremap_resource(0) is fail (%d)", ret);
+ goto p_err_remap_ss1;
}
- ret = vipx_binary_read(binary, VIPX_FW_PATH1, VIPX_FW_DTCM_NAME,
- (void *)system->dtcm, system->dtcm_size);
- if (ret) {
- ret = vipx_binary_read(binary, VIPX_FW_PATH2, VIPX_FW_DTCM_NAME,
- (void *)system->dtcm, system->dtcm_size - SZ_1K);
- if (ret) {
- vipx_err("vipx_dtcm_binary_load is fail(%d)\n", ret);
- goto p_err;
- }
+ sys->reg_ss[REG_SS1] = iomem;
+ sys->reg_ss_size[REG_SS1] = resource_size(res);
+
+ /* VIPX_CPU_SS2 */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ ret = -EINVAL;
+ vipx_err("platform_get_resource(1) is fail");
+ goto p_err_res_ss2;
}
- tail_p = (void *)(system->dtcm + 0x3FD0);
- tail_p->sign = 0xABCDEFAB;
- tail_p->mbox_addr = system->memory.info.dvaddr_mbox;
- tail_p->mbox_size = VIPX_MBOX_SIZE;
- tail_p->debug_addr = system->memory.info.dvaddr_debug;
- tail_p->debug_size = VIPX_DEBUG_SIZE;
+ iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vipx_err("devm_ioremap_resource(1) is fail (%d)", ret);
+ goto p_err_remap_ss2;
+ }
- vipx_info("host_shared : %p, mbox_addr : %x, mbox_size : %x, debug_addr : %x, debug_size : %x\n",
- tail_p, tail_p->mbox_addr, tail_p->mbox_size, tail_p->debug_addr, tail_p->debug_size);
+ sys->reg_ss[REG_SS2] = iomem;
+ sys->reg_ss_size[REG_SS2] = resource_size(res);
- ret = vipx_binary_read(binary, VIPX_FW_PATH1, VIPX_FW_ITCM_NAME,
- (void *)system->itcm, system->itcm_size);
- if (ret) {
- ret = vipx_binary_read(binary, VIPX_FW_PATH2, VIPX_FW_ITCM_NAME,
- (void *)system->itcm, system->itcm_size);
- if (ret) {
- vipx_err("vipx_itcm_binary_load is fail(%d)\n", ret);
- goto p_err;
- }
+ /* VIPX ITCM */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ ret = -EINVAL;
+ vipx_err("platform_get_resource(2) is fail");
+ goto p_err_res_itcm;
}
-// flush_all_cpu_caches();
+ iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vipx_err("devm_ioremap_resource(2) is fail (%d)", ret);
+ goto p_err_remap_itcm;
+ }
- ret = CTL_OP(exynos, ctl_start, 0);
- if (ret) {
- vipx_err("CTL_OP(ctl_start) is fail(%d)\n", ret);
- goto p_err;
+ sys->itcm = iomem;
+ sys->itcm_size = resource_size(res);
+
+ /* VIPX DTCM */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ ret = -EINVAL;
+ vipx_err("platform_get_resource(3) is fail");
+ goto p_err_res_dtcm;
}
-#if 0 // change code position to vipx_vertex_streamon in vipx-vertex.c before streamon execution
- ret = vipx_hw_wait_bootup(&system->interface);
- if (ret) {
- vipx_err("vipx_hw_wait_bootup is fail(%d)\n", ret);
- goto p_err;
+
+ iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vipx_err("devm_ioremap_resource(3) is fail (%d)", ret);
+ goto p_err_remap_dtcm;
}
-#endif
-p_err:
- vipx_info("%s():%d\n", __func__, ret);
+ sys->dtcm = iomem;
+ sys->dtcm_size = resource_size(res);
+
+ sys->cam_qos = 0;
+ sys->mif_qos = 0;
+
+ sys->clk_ops = &vipx_clk_ops;
+ sys->ctrl_ops = &vipx_ctrl_ops;
+ sys->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(sys->pinctrl)) {
+ ret = PTR_ERR(sys->pinctrl);
+ vipx_err("Failed to get devm pinctrl (%d)\n", ret);
+ goto p_err_pinctrl;
+ }
+
+ ret = sys->clk_ops->clk_init(sys);
+ if (ret)
+ goto p_err_clk;
+
+ ret = vipx_memory_probe(sys);
+ if (ret)
+ goto p_err_memory;
+
+ ret = vipx_interface_probe(sys);
+ if (ret)
+ goto p_err_interface;
+
+ ret = vipx_binary_init(sys);
+ if (ret)
+ goto p_err_binary;
+
+ ret = vipx_graphmgr_probe(sys);
+ if (ret)
+ goto p_err_graphmgr;
+
+ vipx_leave();
+ return 0;
+p_err_graphmgr:
+ vipx_binary_deinit(&sys->binary);
+p_err_binary:
+ vipx_interface_remove(&sys->interface);
+p_err_interface:
+ vipx_memory_remove(&sys->memory);
+p_err_memory:
+ sys->clk_ops->clk_deinit(sys);
+p_err_clk:
+ devm_pinctrl_put(sys->pinctrl);
+p_err_pinctrl:
+ devm_iounmap(dev, sys->dtcm);
+p_err_remap_dtcm:
+p_err_res_dtcm:
+ devm_iounmap(dev, sys->itcm);
+p_err_remap_itcm:
+p_err_res_itcm:
+ devm_iounmap(dev, sys->reg_ss[REG_SS2]);
+p_err_remap_ss2:
+p_err_res_ss2:
+ devm_iounmap(dev, sys->reg_ss[REG_SS1]);
+p_err_remap_ss1:
+p_err_res_ss1:
return ret;
}
+
+void vipx_system_remove(struct vipx_system *sys)
+{
+ vipx_enter();
+ vipx_graphmgr_remove(&sys->graphmgr);
+ vipx_binary_deinit(&sys->binary);
+ vipx_interface_remove(&sys->interface);
+ vipx_memory_remove(&sys->memory);
+ sys->clk_ops->clk_deinit(sys);
+ devm_pinctrl_put(sys->pinctrl);
+ devm_iounmap(sys->dev, sys->dtcm);
+ devm_iounmap(sys->dev, sys->itcm);
+ devm_iounmap(sys->dev, sys->reg_ss[REG_SS2]);
+ devm_iounmap(sys->dev, sys->reg_ss[REG_SS1]);
+ vipx_leave();
+}
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef VIPX_SYSTEM_H_
-#define VIPX_SYSTEM_H_
-
-#include <linux/platform_device.h>
+#ifndef __VIPX_SYSTEM_H__
+#define __VIPX_SYSTEM_H__
+#include "platform/vipx-clk.h"
+#include "platform/vipx-ctrl.h"
#include "vipx-interface.h"
#include "vipx-memory.h"
-#include "vipx-exynos.h"
#include "vipx-binary.h"
+#include "vipx-graphmgr.h"
+
+struct vipx_device;
struct vipx_system {
- struct platform_device *pdev;
- void __iomem *reg_ss[VIPX_REG_MAX_CNT];
- void __iomem *itcm;
- void __iomem *dtcm;
- resource_size_t reg_ss_size[VIPX_REG_MAX_CNT];
- resource_size_t itcm_size;
- resource_size_t dtcm_size;
- int irq0;
- int irq1;
-
- u32 cam_qos;
- u32 mif_qos;
-
- struct vipx_interface interface;
- struct vipx_memory memory;
- struct vipx_exynos exynos;
- struct vipx_binary binary;
+ struct device *dev;
+ void __iomem *reg_ss[REG_MAX];
+ resource_size_t reg_ss_size[REG_MAX];
+ void __iomem *itcm;
+ resource_size_t itcm_size;
+ void __iomem *dtcm;
+ resource_size_t dtcm_size;
+
+ unsigned int cam_qos;
+ unsigned int mif_qos;
+
+ const struct vipx_clk_ops *clk_ops;
+ const struct vipx_ctrl_ops *ctrl_ops;
+ struct pinctrl *pinctrl;
+ struct vipx_memory memory;
+ struct vipx_interface interface;
+ struct vipx_binary binary;
+ struct vipx_graphmgr graphmgr;
+
+ struct vipx_device *device;
};
-int vipx_system_probe(struct vipx_system *system, struct platform_device *pdev);
-int vipx_system_open(struct vipx_system *system);
-int vipx_system_close(struct vipx_system *system);
-int vipx_system_resume(struct vipx_system *system, u32 mode);
-int vipx_system_suspend(struct vipx_system *system);
-int vipx_system_start(struct vipx_system *system);
-int vipx_system_stop(struct vipx_system *system);
-int vipx_system_fw_bootup(struct vipx_system *system);
+int vipx_system_fw_bootup(struct vipx_system *sys);
+
+int vipx_system_resume(struct vipx_system *sys);
+int vipx_system_suspend(struct vipx_system *sys);
+
+int vipx_system_start(struct vipx_system *sys);
+int vipx_system_stop(struct vipx_system *sys);
+int vipx_system_open(struct vipx_system *sys);
+int vipx_system_close(struct vipx_system *sys);
+
+int vipx_system_probe(struct vipx_device *device);
+void vipx_system_remove(struct vipx_system *sys);
#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include "vipx-debug.h"
+#include "vipx-log.h"
#include "vipx-taskmgr.h"
-#include "vipx-graphmgr.h"
-#if 0
-#define taskmgr_debug vipx_info
-#else
-#define taskmgr_debug(fmt, args...)
-#endif
-
-void vipx_task_s_free(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+static void __vipx_task_s_free(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
+ vipx_enter();
task->state = VIPX_TASK_STATE_FREE;
-
- list_add_tail(&task->list, &taskmgr->fre_list);
- taskmgr->fre_cnt++;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-
-void vipx_task_g_free(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
-{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
- if (taskmgr->fre_cnt &&
- (*task = container_of(taskmgr->fre_list.next, struct vipx_task, list))) {
- list_del(&(*task)->list);
- taskmgr->fre_cnt--;
- (*task)->state = VIPX_TASK_STATE_INVALID;
- } else {
- *task = NULL;
- }
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-
-void vipx_task_free_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
-{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
- if (taskmgr->fre_cnt)
- *task = container_of(taskmgr->fre_list.next, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ list_add_tail(&task->list, &tmgr->fre_list);
+ tmgr->fre_cnt++;
+ vipx_leave();
}
-void vipx_task_free_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+static struct vipx_task *__vipx_task_get_first_free(struct vipx_taskmgr *tmgr)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
+ struct vipx_task *task;
- if (taskmgr->fre_cnt)
- *task = container_of(taskmgr->fre_list.prev, struct vipx_task, list);
+ vipx_enter();
+ if (tmgr->fre_cnt)
+ task = list_first_entry(&tmgr->fre_list,
+ struct vipx_task, list);
else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-
-void vipx_task_print_free_list(struct vipx_taskmgr *taskmgr)
-{
- DLOG_INIT();
- struct vipx_task *task, *temp;
-
- DLOG("[FRM] fre(%d, %d) :", taskmgr->id, taskmgr->fre_cnt);
- list_for_each_entry_safe(task, temp, &taskmgr->fre_list, list) {
- DLOG("%d->", task->index);
- }
- DLOG("X");
-
- vipx_info("%s\n", DLOG_OUT());
+ task = NULL;
+ vipx_leave();
+ return task;
}
-void vipx_task_s_request(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+static void __vipx_task_s_request(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
+ vipx_enter();
task->state = VIPX_TASK_STATE_REQUEST;
-
- list_add_tail(&task->list, &taskmgr->req_list);
- taskmgr->req_cnt++;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ list_add_tail(&task->list, &tmgr->req_list);
+ tmgr->req_cnt++;
+ vipx_leave();
}
-void vipx_task_g_request(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+static void __vipx_task_s_prepare(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
- if (taskmgr->req_cnt &&
- (*task = container_of(taskmgr->req_list.next, struct vipx_task, list))) {
- list_del(&(*task)->list);
- taskmgr->req_cnt--;
- (*task)->state = VIPX_TASK_STATE_INVALID;
- } else {
- *task = NULL;
- }
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_enter();
+ task->state = VIPX_TASK_STATE_PREPARE;
+ list_add_tail(&task->list, &tmgr->pre_list);
+ tmgr->pre_cnt++;
+ vipx_leave();
}
-void vipx_task_request_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+static void __vipx_task_s_process(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- if (taskmgr->req_cnt)
- *task = container_of(taskmgr->req_list.next, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_enter();
+ task->state = VIPX_TASK_STATE_PROCESS;
+ list_add_tail(&task->list, &tmgr->pro_list);
+ tmgr->pro_cnt++;
+ vipx_leave();
}
-void vipx_task_request_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+static void __vipx_task_s_complete(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- if (taskmgr->req_cnt)
- *task = container_of(taskmgr->req_list.prev, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_enter();
+ task->state = VIPX_TASK_STATE_COMPLETE;
+ list_add_tail(&task->list, &tmgr->com_list);
+ tmgr->com_cnt++;
+ vipx_leave();
}
-void vipx_task_print_request_list(struct vipx_taskmgr *taskmgr)
+void vipx_task_print_free_list(struct vipx_taskmgr *tmgr)
{
- DLOG_INIT();
struct vipx_task *task, *temp;
+ int idx = 0;
- DLOG("[FRM] req(%d, %d) :", taskmgr->id, taskmgr->req_cnt);
- list_for_each_entry_safe(task, temp, &taskmgr->req_list, list) {
- DLOG("%d->", task->index);
+ vipx_enter();
+ vipx_dbg("fre(%d, %d)\n", tmgr->id, tmgr->fre_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->fre_list, list) {
+ vipx_dbg("fre[%d] %d\n", idx++, task->index);
}
- DLOG("X");
-
- vipx_info("%s\n", DLOG_OUT());
+ vipx_leave();
}
-void vipx_task_s_prepare(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
-{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
- task->state = VIPX_TASK_STATE_PREPARE;
-
- list_add_tail(&task->list, &taskmgr->pre_list);
- taskmgr->pre_cnt++;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-void vipx_task_g_prepare(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+void vipx_task_print_request_list(struct vipx_taskmgr *tmgr)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
+ struct vipx_task *task, *temp;
+ int idx = 0;
- if (taskmgr->pre_cnt &&
- (*task = container_of(taskmgr->pre_list.next, struct vipx_task, list))) {
- list_del(&(*task)->list);
- taskmgr->pre_cnt--;
- (*task)->state = VIPX_TASK_STATE_INVALID;
- } else {
- *task = NULL;
+ vipx_enter();
+ vipx_dbg("req(%d, %d)\n", tmgr->id, tmgr->req_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->req_list, list) {
+ vipx_dbg("req[%d] %d\n", idx++, task->index);
}
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-
-void vipx_task_prepare_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
-{
- if (taskmgr->pre_cnt)
- *task = container_of(taskmgr->pre_list.next, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_leave();
}
-void vipx_task_prepare_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+void vipx_task_print_prepare_list(struct vipx_taskmgr *tmgr)
{
- if (taskmgr->pre_cnt)
- *task = container_of(taskmgr->pre_list.prev, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-
-void vipx_task_print_prepare_list(struct vipx_taskmgr *taskmgr)
-{
- DLOG_INIT();
struct vipx_task *task, *temp;
+ int idx = 0;
- DLOG("[FRM] pre(%d, %d) :", taskmgr->id, taskmgr->pre_cnt);
- list_for_each_entry_safe(task, temp, &taskmgr->pre_list, list) {
- DLOG("%d->", task->index);
+ vipx_enter();
+ vipx_dbg("pre(%d, %d)\n", tmgr->id, tmgr->pre_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->pre_list, list) {
+ vipx_dbg("pre[%d] %d\n", idx++, task->index);
}
- DLOG("X");
-
- vipx_info("%s\n", DLOG_OUT());
+ vipx_leave();
}
-void vipx_task_s_process(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_print_process_list(struct vipx_taskmgr *tmgr)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
- task->state = VIPX_TASK_STATE_PROCESS;
+ struct vipx_task *task, *temp;
+ int idx = 0;
- list_add_tail(&task->list, &taskmgr->pro_list);
- taskmgr->pro_cnt++;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_enter();
+ vipx_dbg("pro(%d, %d)\n", tmgr->id, tmgr->pro_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->pro_list, list) {
+ vipx_dbg("pro[%d] %d\n", idx++, task->index);
+ }
+ vipx_leave();
}
-void vipx_task_g_process(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+void vipx_task_print_complete_list(struct vipx_taskmgr *tmgr)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
+ struct vipx_task *task, *temp;
+ int idx = 0;
- if (taskmgr->pro_cnt &&
- (*task = container_of(taskmgr->pro_list.next, struct vipx_task, list))) {
- list_del(&(*task)->list);
- taskmgr->pro_cnt--;
- (*task)->state = VIPX_TASK_STATE_INVALID;
- } else {
- *task = NULL;
+ vipx_enter();
+ vipx_dbg("com(%d, %d)\n", tmgr->id, tmgr->com_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->com_list, list) {
+ vipx_dbg("com[%d] %d\n", idx++, task->index);
}
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_leave();
}
-void vipx_task_process_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+void vipx_task_print_all(struct vipx_taskmgr *tmgr)
{
- if (taskmgr->pro_cnt)
- *task = container_of(taskmgr->pro_list.next, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
-
-void vipx_task_process_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
-{
- if (taskmgr->pro_cnt)
- *task = container_of(taskmgr->pro_list.prev, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_enter();
+ vipx_task_print_free_list(tmgr);
+ vipx_task_print_request_list(tmgr);
+ vipx_task_print_prepare_list(tmgr);
+ vipx_task_print_process_list(tmgr);
+ vipx_task_print_complete_list(tmgr);
+ vipx_leave();
}
-void vipx_task_print_process_list(struct vipx_taskmgr *taskmgr)
+static int __vipx_task_check_free(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- DLOG_INIT();
- struct vipx_task *task, *temp;
-
- DLOG("[FRM] pro(%d, %d) :", taskmgr->id, taskmgr->pro_cnt);
- list_for_each_entry_safe(task, temp, &taskmgr->pro_list, list) {
- DLOG("%d->", task->index);
+ if (!tmgr->fre_cnt) {
+ vipx_warn("fre_cnt is zero\n");
+ return -EINVAL;
}
- DLOG("X");
- vipx_info("%s\n", DLOG_OUT());
+ if (task->state != VIPX_TASK_STATE_FREE) {
+ vipx_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
}
-void vipx_task_s_complete(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+static int __vipx_task_check_request(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
-
- task->state = VIPX_TASK_STATE_COMPLETE;
+ if (!tmgr->req_cnt) {
+ vipx_warn("req_cnt is zero\n");
+ return -EINVAL;
+ }
- list_add_tail(&task->list, &taskmgr->com_list);
- taskmgr->com_cnt++;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ if (task->state != VIPX_TASK_STATE_REQUEST) {
+ vipx_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
}
-void vipx_task_g_complete(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+static int __vipx_task_check_prepare(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
+ if (!tmgr->pre_cnt) {
+ vipx_warn("pre_cnt is zero\n");
+ return -EINVAL;
+ }
- if (taskmgr->com_cnt &&
- (*task = container_of(taskmgr->com_list.next, struct vipx_task, list))) {
- list_del(&(*task)->list);
- taskmgr->com_cnt--;
- (*task)->state = VIPX_TASK_STATE_INVALID;
- } else {
- *task = NULL;
+ if (task->state != VIPX_TASK_STATE_PREPARE) {
+ vipx_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
}
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ return 0;
}
-void vipx_task_complete_head(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+static int __vipx_task_check_process(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- if (taskmgr->com_cnt)
- *task = container_of(taskmgr->com_list.next, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
-}
+ if (!tmgr->pro_cnt) {
+ vipx_warn("pro_cnt is zero\n");
+ return -EINVAL;
+ }
-void vipx_task_complete_tail(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
-{
- if (taskmgr->com_cnt)
- *task = container_of(taskmgr->com_list.prev, struct vipx_task, list);
- else
- *task = NULL;
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ if (task->state != VIPX_TASK_STATE_PROCESS) {
+ vipx_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
}
-void vipx_task_print_complete_list(struct vipx_taskmgr *taskmgr)
+static int __vipx_task_check_complete(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- DLOG_INIT();
- struct vipx_task *task, *temp;
-
- DLOG("[FRM] com(%d, %d) :", taskmgr->id, taskmgr->com_cnt);
- list_for_each_entry_safe(task, temp, &taskmgr->com_list, list) {
- DLOG("%d->", task->index);
+ if (!tmgr->com_cnt) {
+ vipx_warn("com_cnt is zero\n");
+ return -EINVAL;
}
- DLOG("X");
- vipx_info("%s\n", DLOG_OUT());
+ if (task->state != VIPX_TASK_STATE_COMPLETE) {
+ vipx_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
}
-void vipx_task_trans_fre_to_req(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_fre_to_req(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->fre_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_FREE);
+ vipx_enter();
+ if (__vipx_task_check_free(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->fre_cnt--;
- vipx_task_s_request(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->fre_cnt--;
+ __vipx_task_s_request(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_req_to_pre(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_req_to_pre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->req_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_REQUEST);
+ vipx_enter();
+ if (__vipx_task_check_request(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->req_cnt--;
- vipx_task_s_prepare(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->req_cnt--;
+ __vipx_task_s_prepare(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_req_to_pro(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_req_to_pro(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->req_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_REQUEST);
+ vipx_enter();
+ if (__vipx_task_check_request(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->req_cnt--;
- vipx_task_s_process(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->req_cnt--;
+ __vipx_task_s_process(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_req_to_com(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_req_to_com(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->req_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_REQUEST);
+ vipx_enter();
+ if (__vipx_task_check_request(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->req_cnt--;
- vipx_task_s_complete(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->req_cnt--;
+ __vipx_task_s_complete(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_req_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_req_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->req_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_REQUEST);
+ vipx_enter();
+ if (__vipx_task_check_request(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->req_cnt--;
- vipx_task_s_free(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->req_cnt--;
+ __vipx_task_s_free(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_pre_to_pro(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_pre_to_pro(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->pre_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_PREPARE);
+ vipx_enter();
+ if (__vipx_task_check_prepare(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->pre_cnt--;
- vipx_task_s_process(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->pre_cnt--;
+ __vipx_task_s_process(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_pre_to_com(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_pre_to_com(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->pre_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_PREPARE);
+ vipx_enter();
+ if (__vipx_task_check_prepare(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->pre_cnt--;
- vipx_task_s_complete(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->pre_cnt--;
+ __vipx_task_s_complete(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_pre_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_pre_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->pre_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_PREPARE);
+ vipx_enter();
+ if (__vipx_task_check_prepare(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->pre_cnt--;
- vipx_task_s_free(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->pre_cnt--;
+ __vipx_task_s_free(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_pro_to_com(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_pro_to_com(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->pro_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_PROCESS);
+ vipx_enter();
+ if (__vipx_task_check_process(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->pro_cnt--;
- vipx_task_s_complete(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->pro_cnt--;
+ __vipx_task_s_complete(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_pro_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_pro_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->pro_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_PROCESS);
+ vipx_enter();
+ if (__vipx_task_check_process(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->pro_cnt--;
- vipx_task_s_free(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->pro_cnt--;
+ __vipx_task_s_free(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_com_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_com_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
- BUG_ON(!taskmgr->com_cnt);
- BUG_ON(task->state != VIPX_TASK_STATE_COMPLETE);
+ vipx_enter();
+ if (__vipx_task_check_complete(tmgr, task))
+ return;
list_del(&task->list);
- taskmgr->com_cnt--;
- vipx_task_s_free(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ tmgr->com_cnt--;
+ __vipx_task_s_free(tmgr, task);
+ vipx_leave();
}
-void vipx_task_trans_any_to_fre(struct vipx_taskmgr *taskmgr, struct vipx_task *task)
+void vipx_task_trans_any_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
+ vipx_enter();
list_del(&task->list);
switch (task->state) {
case VIPX_TASK_STATE_REQUEST:
- taskmgr->req_cnt--;
+ tmgr->req_cnt--;
break;
case VIPX_TASK_STATE_PREPARE:
- taskmgr->pre_cnt--;
+ tmgr->pre_cnt--;
break;
case VIPX_TASK_STATE_PROCESS:
- taskmgr->pro_cnt--;
+ tmgr->pro_cnt--;
break;
case VIPX_TASK_STATE_COMPLETE:
- taskmgr->com_cnt--;
+ tmgr->com_cnt--;
break;
default:
- BUG();
- break;
+ vipx_err("state(%d) of task is invalid\n", task->state);
+ return;
}
- vipx_task_s_free(taskmgr, task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ __vipx_task_s_free(tmgr, task);
+ vipx_leave();
}
-void vipx_task_pick_fre_to_req(struct vipx_taskmgr *taskmgr, struct vipx_task **task)
+struct vipx_task *vipx_task_pick_fre_to_req(struct vipx_taskmgr *tmgr)
{
- BUG_ON(!taskmgr);
- BUG_ON(!task);
+ struct vipx_task *task;
- vipx_task_free_head(taskmgr, task);
- if (*task)
- vipx_task_trans_fre_to_req(taskmgr, *task);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
+ vipx_enter();
+ task = __vipx_task_get_first_free(tmgr);
+ if (task)
+ vipx_task_trans_fre_to_req(tmgr, task);
+ vipx_leave();
+ return task;
}
-void vipx_task_print_all(struct vipx_taskmgr *taskmgr)
+int vipx_task_init(struct vipx_taskmgr *tmgr, unsigned int id, void *owner)
{
- BUG_ON(!taskmgr);
-
- vipx_task_print_request_list(taskmgr);
- vipx_task_print_prepare_list(taskmgr);
- vipx_task_print_process_list(taskmgr);
- vipx_task_print_complete_list(taskmgr);
-}
-
-int vipx_task_init(struct vipx_taskmgr *taskmgr, void *owner)
-{
- int ret = 0;
unsigned long flags;
- u32 index;
-
- spin_lock_irqsave(&taskmgr->slock, flags);
-
- INIT_LIST_HEAD(&taskmgr->fre_list);
- INIT_LIST_HEAD(&taskmgr->req_list);
- INIT_LIST_HEAD(&taskmgr->pre_list);
- INIT_LIST_HEAD(&taskmgr->pro_list);
- INIT_LIST_HEAD(&taskmgr->com_list);
-
- taskmgr->tot_cnt = VIPX_MAX_TASK;
- taskmgr->fre_cnt = 0;
- taskmgr->req_cnt = 0;
- taskmgr->pre_cnt = 0;
- taskmgr->pro_cnt = 0;
- taskmgr->com_cnt = 0;
-
- /* task index 0 means invalid because firmware can't accept 0 invocation id */
- for (index = 1; index < taskmgr->tot_cnt; ++index) {
- taskmgr->task[index].index = index;
- taskmgr->task[index].owner = owner;
- taskmgr->task[index].findex = VIPX_MAX_TASK;
- taskmgr->task[index].tdindex = VIPX_MAX_TASKDESC;
- vipx_task_s_free(taskmgr, &taskmgr->task[index]);
+ unsigned int index;
+
+ vipx_enter();
+ spin_lock_irqsave(&tmgr->slock, flags);
+ tmgr->id = id;
+ tmgr->sindex = 0;
+
+ tmgr->tot_cnt = VIPX_MAX_TASK;
+ tmgr->fre_cnt = 0;
+ tmgr->req_cnt = 0;
+ tmgr->pre_cnt = 0;
+ tmgr->pro_cnt = 0;
+ tmgr->com_cnt = 0;
+
+ INIT_LIST_HEAD(&tmgr->fre_list);
+ INIT_LIST_HEAD(&tmgr->req_list);
+ INIT_LIST_HEAD(&tmgr->pre_list);
+ INIT_LIST_HEAD(&tmgr->pro_list);
+ INIT_LIST_HEAD(&tmgr->com_list);
+
+ /*
+ * task index 0 means invalid
+ * because firmware can't accept 0 invocation id
+ */
+ for (index = 1; index < tmgr->tot_cnt; ++index) {
+ tmgr->task[index].index = index;
+ tmgr->task[index].owner = owner;
+ tmgr->task[index].findex = VIPX_MAX_TASK;
+ tmgr->task[index].tdindex = VIPX_MAX_TASKDESC;
+ __vipx_task_s_free(tmgr, &tmgr->task[index]);
}
- spin_unlock_irqrestore(&taskmgr->slock, flags);
+ spin_unlock_irqrestore(&tmgr->slock, flags);
- return ret;
+ vipx_leave();
+ return 0;
}
-int vipx_task_deinit(struct vipx_taskmgr *taskmgr)
+int vipx_task_deinit(struct vipx_taskmgr *tmgr)
{
- int ret = 0;
unsigned long flags;
- u32 index;
+ unsigned int index;
- spin_lock_irqsave(&taskmgr->slock, flags);
+ vipx_enter();
+ spin_lock_irqsave(&tmgr->slock, flags);
- for (index = 0; index < taskmgr->tot_cnt; ++index)
- vipx_task_s_free(taskmgr, &taskmgr->task[index]);
+ for (index = 0; index < tmgr->tot_cnt; ++index)
+ __vipx_task_s_free(tmgr, &tmgr->task[index]);
- spin_unlock_irqrestore(&taskmgr->slock, flags);
+ spin_unlock_irqrestore(&tmgr->slock, flags);
- taskmgr_debug("[TASKMGR] %s[%d]\n", __func__, __LINE__);
- return ret;
+ vipx_leave();
+ return 0;
}
-void vipx_task_flush(struct vipx_taskmgr *taskmgr)
+void vipx_task_flush(struct vipx_taskmgr *tmgr)
{
- unsigned long flag;
+ unsigned long flags;
struct vipx_task *task, *temp;
- BUG_ON(!taskmgr);
-
- spin_lock_irqsave(&taskmgr->slock, flag);
+ vipx_enter();
+ spin_lock_irqsave(&tmgr->slock, flags);
- list_for_each_entry_safe(task, temp, &taskmgr->req_list, list) {
- vipx_task_trans_req_to_fre(taskmgr, task);
- vipx_warn("req task%d is flushed(count : %d)", task->id, taskmgr->req_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->req_list, list) {
+ vipx_task_trans_req_to_fre(tmgr, task);
+ vipx_warn("req task(%d) is flushed(count:%d)",
+ task->id, tmgr->req_cnt);
}
- list_for_each_entry_safe(task, temp, &taskmgr->pre_list, list) {
- vipx_task_trans_pre_to_fre(taskmgr, task);
- vipx_warn("pre task%d is flushed(count : %d)", task->id, taskmgr->pre_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->pre_list, list) {
+ vipx_task_trans_pre_to_fre(tmgr, task);
+ vipx_warn("pre task(%d) is flushed(count:%d)",
+ task->id, tmgr->pre_cnt);
}
- list_for_each_entry_safe(task, temp, &taskmgr->pro_list, list) {
- vipx_task_trans_pro_to_fre(taskmgr, task);
- vipx_warn("pro task%d is flushed(count : %d)", task->id, taskmgr->pro_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->pro_list, list) {
+ vipx_task_trans_pro_to_fre(tmgr, task);
+ vipx_warn("pro task(%d) is flushed(count:%d)",
+ task->id, tmgr->pro_cnt);
}
- list_for_each_entry_safe(task, temp, &taskmgr->com_list, list) {
- vipx_task_trans_com_to_fre(taskmgr, task);
- vipx_warn("com task%d is flushed(count : %d)", task->id, taskmgr->com_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->com_list, list) {
+ vipx_task_trans_com_to_fre(tmgr, task);
+ vipx_warn("com task(%d) is flushed(count:%d)",
+ task->id, tmgr->com_cnt);
}
- spin_unlock_irqrestore(&taskmgr->slock, flag);
+ spin_unlock_irqrestore(&tmgr->slock, flags);
- BUG_ON(taskmgr->fre_cnt != (taskmgr->tot_cnt - 1));
+ if (tmgr->fre_cnt != (tmgr->tot_cnt - 1))
+ vipx_warn("free count leaked (%d/%d/%d/%d/%d/%d)\n",
+ tmgr->fre_cnt, tmgr->req_cnt,
+ tmgr->pre_cnt, tmgr->pro_cnt,
+ tmgr->com_cnt, tmgr->tot_cnt);
+ vipx_leave();
}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_TASKMGR_H__
+#define __VIPX_TASKMGR_H__
+
+#include <linux/kthread.h>
+
+#include "vipx-config.h"
+#include "vipx-time.h"
+
+#define TASKMGR_IDX_0 (1 << 0) /* graphmgr thread */
+#define TASKMGR_IDX_1 (1 << 1)
+#define TASKMGR_IDX_2 (1 << 2)
+#define TASKMGR_IDX_3 (1 << 3)
+#define TASKMGR_IDX_4 (1 << 4)
+#define TASKMGR_IDX_5 (1 << 5)
+#define TASKMGR_IDX_6 (1 << 6)
+#define TASKMGR_IDX_7 (1 << 7)
+#define TASKMGR_IDX_8 (1 << 8)
+#define TASKMGR_IDX_9 (1 << 9)
+#define TASKMGR_IDX_10 (1 << 10)
+#define TASKMGR_IDX_11 (1 << 11)
+#define TASKMGR_IDX_12 (1 << 12)
+#define TASKMGR_IDX_13 (1 << 13)
+#define TASKMGR_IDX_14 (1 << 14)
+#define TASKMGR_IDX_15 (1 << 15)
+#define TASKMGR_IDX_16 (1 << 16)
+#define TASKMGR_IDX_17 (1 << 17)
+#define TASKMGR_IDX_18 (1 << 18)
+#define TASKMGR_IDX_19 (1 << 19)
+#define TASKMGR_IDX_20 (1 << 20)
+#define TASKMGR_IDX_21 (1 << 21)
+#define TASKMGR_IDX_22 (1 << 22)
+#define TASKMGR_IDX_23 (1 << 23)
+#define TASKMGR_IDX_24 (1 << 24)
+#define TASKMGR_IDX_25 (1 << 25)
+#define TASKMGR_IDX_26 (1 << 26)
+#define TASKMGR_IDX_27 (1 << 27)
+#define TASKMGR_IDX_28 (1 << 28)
+#define TASKMGR_IDX_29 (1 << 29)
+#define TASKMGR_IDX_30 (1 << 30)
+#define TASKMGR_IDX_31 (1 << 31)
+
+#define taskmgr_e_barrier_irqs(taskmgr, index, flag) \
+ do { \
+ taskmgr->sindex |= index; \
+ spin_lock_irqsave(&taskmgr->slock, flag); \
+ } while (0)
+
+#define taskmgr_x_barrier_irqr(taskmgr, index, flag) \
+ do { \
+ spin_unlock_irqrestore(&taskmgr->slock, flag); \
+ taskmgr->sindex &= ~index; \
+ } while (0)
+
+#define taskmgr_e_barrier_irq(taskmgr, index) \
+ do { \
+ taskmgr->sindex |= index; \
+ spin_lock_irq(&taskmgr->slock); \
+ } while (0)
+
+#define taskmgr_x_barrier_irq(taskmgr, index) \
+ do { \
+ spin_unlock_irq(&taskmgr->slock); \
+ taskmgr->sindex &= ~index; \
+ } while (0)
+
+#define taskmgr_e_barrier(taskmgr, index) \
+ do { \
+ taskmgr->sindex |= index; \
+ spin_lock(&taskmgr->slock); \
+ } while (0)
+
+#define taskmgr_x_barrier(taskmgr, index) \
+ do { \
+ spin_unlock(&taskmgr->slock); \
+ taskmgr->sindex &= ~index; \
+ } while (0)
+
+enum vipx_task_state {
+ VIPX_TASK_STATE_FREE = 1,
+ VIPX_TASK_STATE_REQUEST,
+ VIPX_TASK_STATE_PREPARE,
+ VIPX_TASK_STATE_PROCESS,
+ VIPX_TASK_STATE_COMPLETE,
+ VIPX_TASK_STATE_INVALID
+};
+
+enum vipx_task_flag {
+ VIPX_TASK_FLAG_IOCPY = 16
+};
+
+enum vipx_task_message {
+ VIPX_TASK_INIT = 1,
+ VIPX_TASK_DEINIT,
+ VIPX_TASK_CREATE,
+ VIPX_TASK_DESTROY,
+ VIPX_TASK_ALLOCATE,
+ VIPX_TASK_PROCESS,
+ VIPX_TASK_REQUEST = 10,
+ VIPX_TASK_DONE,
+ VIPX_TASK_NDONE
+};
+
+enum vipx_control_message {
+ VIPX_CTRL_NONE = 100,
+ VIPX_CTRL_STOP,
+ VIPX_CTRL_STOP_DONE
+};
+
+struct vipx_task {
+ struct list_head list;
+ struct kthread_work work;
+ unsigned int state;
+
+ unsigned int message;
+ unsigned long param0;
+ unsigned long param1;
+ unsigned long param2;
+ unsigned long param3;
+
+ struct vipx_container_list *incl;
+ struct vipx_container_list *otcl;
+ unsigned long flags;
+
+ unsigned int id;
+ struct mutex *lock;
+ unsigned int index;
+ unsigned int findex;
+ unsigned int tdindex;
+ void *owner;
+
+ struct vipx_time time[VIPX_TIME_COUNT];
+};
+
+struct vipx_taskmgr {
+ unsigned int id;
+ unsigned int sindex;
+ spinlock_t slock;
+ struct vipx_task task[VIPX_MAX_TASK];
+
+ struct list_head fre_list;
+ struct list_head req_list;
+ struct list_head pre_list;
+ struct list_head pro_list;
+ struct list_head com_list;
+
+ unsigned int tot_cnt;
+ unsigned int fre_cnt;
+ unsigned int req_cnt;
+ unsigned int pre_cnt;
+ unsigned int pro_cnt;
+ unsigned int com_cnt;
+};
+
+void vipx_task_print_free_list(struct vipx_taskmgr *tmgr);
+void vipx_task_print_request_list(struct vipx_taskmgr *tmgr);
+void vipx_task_print_prepare_list(struct vipx_taskmgr *tmgr);
+void vipx_task_print_process_list(struct vipx_taskmgr *tmgr);
+void vipx_task_print_complete_list(struct vipx_taskmgr *tmgr);
+void vipx_task_print_all(struct vipx_taskmgr *tmgr);
+
+void vipx_task_trans_fre_to_req(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_req_to_pre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_req_to_pro(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_req_to_com(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_req_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_pre_to_pro(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_pre_to_com(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_pre_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_pro_to_com(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_pro_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_com_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+void vipx_task_trans_any_to_fre(struct vipx_taskmgr *tmgr,
+ struct vipx_task *task);
+
+struct vipx_task *vipx_task_pick_fre_to_req(struct vipx_taskmgr *tmgr);
+
+int vipx_task_init(struct vipx_taskmgr *tmgr, unsigned int id, void *owner);
+int vipx_task_deinit(struct vipx_taskmgr *tmgr);
+void vipx_task_flush(struct vipx_taskmgr *tmgr);
+
+#endif
/*
* Samsung Exynos SoC series VIPx driver
*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/ktime.h>
+#include "vipx-log.h"
#include "vipx-time.h"
void vipx_get_timestamp(struct vipx_time *time)
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_TIME_H__
+#define __VIPX_TIME_H__
+
+#include <linux/time.h>
+
+enum vipx_time_measure_point {
+ VIPX_TIME_QUEUE,
+ VIPX_TIME_REQUEST,
+ VIPX_TIME_RESOURCE,
+ VIPX_TIME_PROCESS,
+ VIPX_TIME_DONE,
+ VIPX_TIME_COUNT
+};
+
+struct vipx_time {
+ struct timeval time;
+};
+
+void vipx_get_timestamp(struct vipx_time *time);
+
+#define VIPX_TIME_IN_US(v) ((v).time.tv_sec * 1000000 + (v).time.tv_usec)
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vipx-log.h"
+#include "vipx-util.h"
+
+void vipx_util_bitmap_init(unsigned long *bitmap, unsigned int nbits)
+{
+ bitmap_zero(bitmap, nbits);
+}
+
+unsigned int vipx_util_bitmap_get_zero_bit(unsigned long *bitmap,
+ unsigned int nbits)
+{
+ int bit, old_bit;
+
+ while ((bit = find_first_zero_bit(bitmap, nbits)) != nbits) {
+ old_bit = test_and_set_bit(bit, bitmap);
+ if (!old_bit)
+ return bit;
+ }
+
+ return nbits;
+}
+
+void vipx_util_bitmap_clear_bit(unsigned long *bitmap, int bit)
+{
+ clear_bit(bit, bitmap);
+}
+
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VIPX_UTIL_H__
+#define __VIPX_UTIL_H__
+
+#include <linux/bitmap.h>
+
+void vipx_util_bitmap_init(unsigned long *bitmap, unsigned int nbits);
+unsigned int vipx_util_bitmap_get_zero_bit(unsigned long *bitmap,
+ unsigned int nbits);
+void vipx_util_bitmap_clear_bit(unsigned long *bitmap, int bit);
+
+#endif
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <video/videonode.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-#include <linux/firmware.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/bug.h>
-
-#include "vipx-config.h"
-#include "vision-ioctl.h"
-#include "vipx-vertex.h"
-#include "vision-dev.h"
-#include "vipx-device.h"
-#include "vipx-graphmgr.h"
-#include "vipx-graph.h"
-#include "vipx-queue.h"
-#include "vipx-control.h"
-
-const struct vision_file_ops vipx_vertex_fops;
-const const struct vertex_ioctl_ops vipx_vertex_ioctl_ops;
-
-static int __vref_open(struct vipx_vertex *vertex)
-{
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- atomic_set(&vertex->start_cnt.refcount, 0);
- return vipx_device_open(device);
-}
-
-static int __vref_close(struct vipx_vertex *vertex)
-{
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- return vipx_device_close(device);
-}
-
-static int __vref_start(struct vipx_vertex *vertex)
-{
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- return vipx_device_start(device);
-}
-
-static int __vref_stop(struct vipx_vertex *vertex)
-{
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- return vipx_device_stop(device);
-}
-
-static inline void __vref_init(struct vipx_vertex_refcount *vref,
- struct vipx_vertex *vertex, int (*first)(struct vipx_vertex *vertex), int (*final)(struct vipx_vertex *vertex))
-{
- vref->vertex = vertex;
- vref->first = first;
- vref->final = final;
- atomic_set(&vref->refcount, 0);
-}
-
-static inline int __vref_get(struct vipx_vertex_refcount *vref)
-{
- return (atomic_inc_return(&vref->refcount) == 1) ? vref->first(vref->vertex) : 0;
-}
-
-static inline int __vref_put(struct vipx_vertex_refcount *vref)
-{
- return (atomic_dec_return(&vref->refcount) == 0) ? vref->final(vref->vertex) : 0;
-}
-
-int vipx_vertex_probe(struct vipx_vertex *vertex, struct device *parent)
-{
- int ret = 0;
- struct vision_device *vdev;
-
- BUG_ON(!vertex);
- BUG_ON(!parent);
-
- get_device(parent);
- mutex_init(&vertex->lock);
- __vref_init(&vertex->open_cnt, vertex, __vref_open, __vref_close);
- __vref_init(&vertex->start_cnt, vertex, __vref_start, __vref_stop);
-
- vdev = &vertex->vd;
- snprintf(vdev->name, sizeof(vdev->name), "%s", VIPX_VERTEX_NAME);
- vdev->fops = &vipx_vertex_fops;
- vdev->ioctl_ops = &vipx_vertex_ioctl_ops;
- vdev->release = NULL;
- vdev->lock = NULL;
- vdev->parent = parent;
- vdev->type = VISION_DEVICE_TYPE_VERTEX;
- dev_set_drvdata(&vdev->dev, vertex);
-
- ret = vision_register_device(vdev, VIPX_VERTEX_MINOR, vipx_vertex_fops.owner);
- if (ret) {
- probe_err("vision_register_device is fail(%d)\n", ret);
- goto p_err;
- }
-
- probe_info("%s():%d\n", __func__, ret);
-
-p_err:
- return ret;
-}
-
-/*
- * =============================================================================
- * Video File Opertation
- * =============================================================================
- */
-
-static int vipx_vertex_open(struct file *file)
-{
- int ret = 0;
- struct vipx_vertex *vertex = dev_get_drvdata(&vision_devdata(file)->dev);
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- struct mutex *lock = &vertex->lock;
- struct vipx_vertex_ctx *vctx = NULL;
- struct vipx_graph *graph = NULL;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_err("mutex_lock_interruptible is fail\n");
- return -ERESTARTSYS;
- }
-
- ret = __vref_get(&vertex->open_cnt);
- if (ret) {
- vipx_err("vref_get is fail(%d)", ret);
- goto p_err;
- }
-
- ret = vipx_graph_create(&graph,
- &device->graphmgr,
- &device->system.memory);
- if (ret) {
- vipx_err("vipx_graph_create is fail(%d)", ret);
- goto p_err;
- }
-
- vctx = &graph->vctx;
- vctx->idx = graph->idx;
- vctx->vertex = vertex;
- mutex_init(&vctx->lock);
-
- ret = vipx_queue_init(&vctx->queue, &device->system.memory, &vctx->lock);
- if (ret) {
- vipx_err("vipx_queue_init is fail(%d)", ret);
- goto p_err;
- }
-
- file->private_data = vctx;
- vctx->state = BIT(VIPX_VERTEX_OPEN);
-
-p_err:
- vipx_info("%s():%d\n", __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_close(struct file *file)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_graph *graph = container_of(vctx, struct vipx_graph, vctx);
- struct vipx_vertex *vertex = vctx->vertex;
- struct mutex *lock = &vertex->lock;
-#if !DISABLE_VIPX_LOG
- int idx = vctx->idx;
-#endif
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- ret = vipx_graph_destroy(graph);
- if (ret) {
- vipx_err("vipx_graph_destroy is fail(%d)", ret);
- goto p_err;
- }
-
- ret = __vref_put(&vertex->open_cnt);
- if (ret) {
- vipx_err("vref_put is fail(%d)", ret);
- goto p_err;
- }
-
-p_err:
- vipx_info("[I%d]%s():%d\n", idx, __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static unsigned int vipx_vertex_poll(struct file *file,
- poll_table *poll)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_queue *queue = &vctx->queue;
-
- if (!(vctx->state & BIT(VIPX_VERTEX_START))) {
- vipx_ierr("invalid state(%X)", vctx, vctx->state);
- ret |= POLLERR;
- goto p_err;
- }
-
- ret = vipx_queue_poll(queue, file, poll);
-
-p_err:
- return ret;
-}
-
-const struct vision_file_ops vipx_vertex_fops = {
- .owner = THIS_MODULE,
- .open = vipx_vertex_open,
- .release = vipx_vertex_close,
- .poll = vipx_vertex_poll,
- .ioctl = vertex_ioctl,
- .compat_ioctl = vertex_compat_ioctl32
-};
-
-/*
- * =============================================================================
- * Video Ioctl Opertation
- * =============================================================================
- */
-
-static int vipx_vertex_s_graph(struct file *file, struct vs4l_graph *ginfo)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_graph *graph = container_of(vctx, struct vipx_graph, vctx);
- struct mutex *lock = &vctx->lock;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- if (!(vctx->state & BIT(VIPX_VERTEX_OPEN))) {
- vipx_ierr("invalid state(%X)\n", vctx, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = vipx_graph_config(graph, ginfo);
- if (ret) {
- vipx_err("vipx_graph_config is fail(%d)\n", ret);
- goto p_err;
- }
-
- vctx->state = BIT(VIPX_VERTEX_GRAPH);
-
-p_err:
- vipx_iinfo("%s():%d\n", vctx, __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_s_format(struct file *file, struct vs4l_format_list *flist)
-{
- int ret = 0;
- int bioctl32 = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_queue *queue = &vctx->queue;
- struct mutex *lock = &vctx->lock;
- struct vs4l_format *tmp_buf = 0;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- if (!(vctx->state & (BIT(VIPX_VERTEX_GRAPH) | BIT(VIPX_VERTEX_FORMAT)))) {
- vipx_ierr("invalid state(%X)\n", vctx, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- tmp_buf = kzalloc(flist->count * sizeof(struct vs4l_format), GFP_KERNEL);
- if (!tmp_buf) {
- ret = -ENOMEM;
- goto p_err;
- }
-
- ret = copy_from_user(tmp_buf, (void __user *)flist->formats, flist->count * sizeof(struct vs4l_format));
- if (ret) {
- // kfree(tmp_buf);
- memcpy(tmp_buf, (void __user *)flist->formats, flist->count * sizeof(struct vs4l_format));
- ret = 0;
- // goto p_err;
- bioctl32 = 1;
-
- }
-
- flist->formats = tmp_buf;
-
- ret = vipx_queue_s_format(queue, flist);
- if (ret) {
- vipx_ierr("vipx_queue_s_format is fail(%d)\n", vctx, ret);
- kfree(tmp_buf);
- goto p_err;
- }
-
- if (!bioctl32)
- {
- kfree(tmp_buf);
- }
-
- vctx->state = BIT(VIPX_VERTEX_FORMAT);
-
-p_err:
- vipx_iinfo("%s():%d\n", vctx, __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_s_param(struct file *file, struct vs4l_param_list *plist)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_graph *graph = container_of(vctx, struct vipx_graph, vctx);
- struct mutex *lock = &vctx->lock;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- if (!(vctx->state & BIT(VIPX_VERTEX_START))) {
- vipx_ierr("invalid state(%X)\n", vctx, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = vipx_graph_param(graph, plist);
- if (ret) {
- vipx_err("vipx_graph_param is fail(%d)\n", ret);
- goto p_err;
- }
-
-p_err:
- vipx_iinfo("%s():%d\n", vctx, __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_s_ctrl(struct file *file, struct vs4l_ctrl *ctrl)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_vertex *vertex = dev_get_drvdata(&vision_devdata(file)->dev);
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- struct vipx_graph *graph = container_of(vctx, struct vipx_graph, vctx);
- struct mutex *lock = &vctx->lock;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- switch (ctrl->ctrl) {
- case VIPX_CTRL_DUMP:
- vipx_graph_print(graph);
- break;
- case VIPX_CTRL_MODE:
- device->mode = ctrl->value;
- break;
- case VIPX_CTRL_TEST:
- break;
- default:
- vipx_ierr("request control is invalid(%d)\n", vctx, ctrl->ctrl);
- ret = -EINVAL;
- break;
- }
-
- vipx_iinfo("%s(%d):%d\n", vctx, __func__, ctrl->ctrl, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_qbuf(struct file *file, struct vs4l_container_list *clist)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_queue *queue = &vctx->queue;
- struct mutex *lock = &vctx->lock;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- if (!(vctx->state & BIT(VIPX_VERTEX_START))) {
- vipx_ierr("(%d) invalid state(%X)\n", vctx, clist->direction, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = vipx_queue_qbuf(queue, clist);
- if (ret) {
- vipx_ierr("(%d) vipx_queue_qbuf is fail(%d)\n", vctx, clist->direction, ret);
- goto p_err;
- }
-
-p_err:
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_dqbuf(struct file *file, struct vs4l_container_list *clist)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_queue *queue = &vctx->queue;
- struct mutex *lock = &vctx->lock;
- bool nonblocking = file->f_flags & O_NONBLOCK;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- if (!(vctx->state & BIT(VIPX_VERTEX_START))) {
- vipx_ierr("(%d) invalid state(%X)\n", vctx, clist->direction, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = vipx_queue_dqbuf(queue, clist, nonblocking);
- if (ret) {
- vipx_ierr("(%d) vipx_queue_dqbuf is fail(%d)\n", vctx, clist->direction, ret);
- goto p_err;
- }
-
-p_err:
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_streamon(struct file *file)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_vertex *vertex = vctx->vertex;
- struct vipx_queue *queue = &vctx->queue;
- struct mutex *lock = &vctx->lock;
- struct vipx_device *device = container_of(vertex, struct vipx_device, vertex);
- struct vipx_system *system = &device->system;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- ret = vipx_hw_wait_bootup(&system->interface);
- if (ret) {
- vision_err("CM7 bootup is fail(%d)\n", ret);
- goto p_err;
- }
-
- if (!(vctx->state & (BIT(VIPX_VERTEX_FORMAT) | BIT(VIPX_VERTEX_STOP)))) {
- vipx_ierr("invalid state(%X)\n", vctx, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = __vref_get(&vertex->start_cnt);
- if (ret) {
- vipx_err("vref_get is fail(%d)\n", ret);
- goto p_err;
- }
-
- ret = vipx_queue_start(queue);
- if (ret) {
- vipx_ierr("vipx_queue_start is fail(%d)\n", vctx, ret);
- goto p_err;
- }
-
- vctx->state = BIT(VIPX_VERTEX_START);
-
-p_err:
- vipx_iinfo("%s():%d\n", vctx, __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-static int vipx_vertex_streamoff(struct file *file)
-{
- int ret = 0;
- struct vipx_vertex_ctx *vctx = file->private_data;
- struct vipx_vertex *vertex = vctx->vertex;
- struct vipx_queue *queue = &vctx->queue;
- struct mutex *lock = &vctx->lock;
-
- if (mutex_lock_interruptible(lock)) {
- vipx_ierr("mutex_lock_interruptible is fail\n", vctx);
- return -ERESTARTSYS;
- }
-
- if (!(vctx->state & BIT(VIPX_VERTEX_START))) {
- vipx_ierr("invalid state(%X)\n", vctx, vctx->state);
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = vipx_queue_stop(queue);
- if (ret) {
- vipx_ierr("vipx_queue_stop is fail(%d)\n", vctx, ret);
- goto p_err;
- }
-
- ret = __vref_put(&vertex->start_cnt);
- if (ret) {
- vipx_err("vref_put is fail(%d)\n", ret);
- goto p_err;
- }
-
- vctx->state = BIT(VIPX_VERTEX_STOP);
-
-p_err:
- vipx_iinfo("%s():%d\n", vctx, __func__, ret);
- mutex_unlock(lock);
- return ret;
-}
-
-const struct vertex_ioctl_ops vipx_vertex_ioctl_ops = {
- .vertexioc_s_graph = vipx_vertex_s_graph,
- .vertexioc_s_format = vipx_vertex_s_format,
- .vertexioc_s_param = vipx_vertex_s_param,
- .vertexioc_s_ctrl = vipx_vertex_s_ctrl,
- .vertexioc_qbuf = vipx_vertex_qbuf,
- .vertexioc_dqbuf = vipx_vertex_dqbuf,
- .vertexioc_streamon = vipx_vertex_streamon,
- .vertexioc_streamoff = vipx_vertex_streamoff
-};
+++ /dev/null
-/*
- * Samsung Exynos SoC series VIPx driver
- *
- * Copyright (c) 2017 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef VIPX_VERTEX_H_
-#define VIPX_VERTEX_H_
-
-#define VIPX_VERTEX_NAME "vertex"
-#define VIPX_VERTEX_MINOR 0
-
-#include "vision-dev.h"
-#include "vision-ioctl.h"
-#include "vipx-queue.h"
-
-struct vipx_vertex;
-
-enum vipx_vertex_state {
- VIPX_VERTEX_OPEN,
- VIPX_VERTEX_GRAPH,
- VIPX_VERTEX_FORMAT,
- VIPX_VERTEX_START,
- VIPX_VERTEX_STOP
-};
-
-struct vipx_vertex_refcount {
- atomic_t refcount;
- struct vipx_vertex *vertex;
- int (*first)(struct vipx_vertex *vertex);
- int (*final)(struct vipx_vertex *vertex);
-};
-
-struct vipx_vertex {
- struct mutex lock;
- struct vision_device vd;
- struct vipx_vertex_refcount open_cnt;
- struct vipx_vertex_refcount start_cnt;
-};
-
-struct vipx_vertex_ctx {
- u32 state;
- u32 idx;
- struct mutex lock;
- struct vipx_queue queue;
- struct vipx_vertex *vertex;
-};
-
-int vipx_vertex_probe(struct vipx_vertex *vertex, struct device *parent);
-
-#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TEMP_VISION_FOR_LINUX_H__
+#define __TEMP_VISION_FOR_LINUX_H__
+
+#define MAX_NUM_OF_USER_PARAMS (180)
+
+enum vs4l_graph_flag {
+ VS4L_GRAPH_FLAG_FIXED,
+ VS4L_GRAPH_FLAG_SHARED_AMONG_SUBCHAINS,
+ VS4L_GRAPH_FLAG_SHARING_AMONG_SUBCHAINS_PREFERRED,
+ VS4L_GRAPH_FLAG_SHARED_AMONG_TASKS,
+ VS4L_GRAPH_FLAG_SHARING_AMONG_TASKS_PREFERRED,
+ VS4L_GRAPH_FLAG_DSBL_LATENCY_BALANCING,
+ VS4L_STATIC_ALLOC_LARGE_MPRB_INSTEAD_SMALL_FLAG,
+ VS4L_STATIC_ALLOC_PU_INSTANCE_LSB,
+ VS4L_GRAPH_FLAG_PRIMITIVE,
+ VS4L_GRAPH_FLAG_EXCLUSIVE,
+ VS4L_GRAPH_FLAG_PERIODIC,
+ VS4L_GRAPH_FLAG_ROUTING,
+ VS4L_GRAPH_FLAG_BYPASS,
+ VS4L_GRAPH_FLAG_END
+};
+
+struct vs4l_graph {
+ __u32 id;
+ __u32 priority;
+ __u32 time; /* in millisecond */
+ __u32 flags;
+ __u32 size;
+ unsigned long addr;
+};
+
+struct vs4l_format {
+ __u32 target;
+ __u32 format;
+ __u32 plane;
+ __u32 width;
+ __u32 height;
+};
+
+struct vs4l_format_list {
+ __u32 direction;
+ __u32 count;
+ struct vs4l_format *formats;
+};
+
+struct vs4l_param {
+ __u32 target;
+ unsigned long addr;
+ __u32 offset;
+ __u32 size;
+};
+
+struct vs4l_param_list {
+ __u32 count;
+ struct vs4l_param *params;
+};
+
+struct vs4l_ctrl {
+ __u32 ctrl;
+ __u32 value;
+};
+
+struct vs4l_roi {
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+};
+
+struct vs4l_buffer {
+ struct vs4l_roi roi;
+ union {
+ unsigned long userptr;
+ __s32 fd;
+ } m;
+ unsigned long reserved;
+};
+
+enum vs4l_buffer_type {
+ VS4L_BUFFER_LIST,
+ VS4L_BUFFER_ROI,
+ VS4L_BUFFER_PYRAMID
+};
+
+enum vs4l_memory {
+ VS4L_MEMORY_USERPTR = 1,
+ VS4L_MEMORY_VIRTPTR,
+ VS4L_MEMORY_DMABUF
+};
+
+struct vs4l_container {
+ __u32 type;
+ __u32 target;
+ __u32 memory;
+ __u32 reserved[4];
+ __u32 count;
+ struct vs4l_buffer *buffers;
+};
+
+enum vs4l_direction {
+ VS4L_DIRECTION_IN = 1,
+ VS4L_DIRECTION_OT
+};
+
+enum vs4l_cl_flag {
+ VS4L_CL_FLAG_TIMESTAMP,
+ VS4L_CL_FLAG_PREPARE = 8,
+ VS4L_CL_FLAG_INVALID,
+ VS4L_CL_FLAG_DONE
+};
+
+struct vs4l_container_list {
+ __u32 direction;
+ __u32 id;
+ __u32 index;
+ __u32 flags;
+ struct timeval timestamp[6];
+ __u32 count;
+ __u32 user_params[MAX_NUM_OF_USER_PARAMS];
+ struct vs4l_container *containers;
+};
+
+#define VS4L_DF_IMAGE(a, b, c, d) ((a) | (b << 8) | (c << 16) | (d << 24))
+#define VS4L_DF_IMAGE_RGB VS4L_DF_IMAGE('R', 'G', 'B', '2')
+#define VS4L_DF_IMAGE_RGBX VS4L_DF_IMAGE('R', 'G', 'B', 'A')
+#define VS4L_DF_IMAGE_NV12 VS4L_DF_IMAGE('N', 'V', '1', '2')
+#define VS4L_DF_IMAGE_NV21 VS4L_DF_IMAGE('N', 'V', '2', '1')
+#define VS4L_DF_IMAGE_YV12 VS4L_DF_IMAGE('Y', 'V', '1', '2')
+#define VS4L_DF_IMAGE_I420 VS4L_DF_IMAGE('I', '4', '2', '0')
+#define VS4L_DF_IMAGE_I422 VS4L_DF_IMAGE('I', '4', '2', '2')
+#define VS4L_DF_IMAGE_YUYV VS4L_DF_IMAGE('Y', 'U', 'Y', 'V')
+#define VS4L_DF_IMAGE_YUV4 VS4L_DF_IMAGE('Y', 'U', 'V', '4')
+#define VS4L_DF_IMAGE_U8 VS4L_DF_IMAGE('U', '0', '0', '8')
+#define VS4L_DF_IMAGE_U16 VS4L_DF_IMAGE('U', '0', '1', '6')
+#define VS4L_DF_IMAGE_U32 VS4L_DF_IMAGE('U', '0', '3', '2')
+#define VS4L_DF_IMAGE_S16 VS4L_DF_IMAGE('S', '0', '1', '6')
+#define VS4L_DF_IMAGE_S32 VS4L_DF_IMAGE('S', '0', '3', '2')
+
+#define VS4L_VERTEXIOC_S_GRAPH _IOW('V', 0, struct vs4l_graph)
+#define VS4L_VERTEXIOC_S_FORMAT _IOW('V', 1, struct vs4l_format_list)
+#define VS4L_VERTEXIOC_S_PARAM _IOW('V', 2, struct vs4l_param_list)
+#define VS4L_VERTEXIOC_S_CTRL _IOW('V', 3, struct vs4l_ctrl)
+#define VS4L_VERTEXIOC_STREAM_ON _IO('V', 4)
+#define VS4L_VERTEXIOC_STREAM_OFF _IO('V', 5)
+#define VS4L_VERTEXIOC_QBUF _IOW('V', 6, struct vs4l_container_list)
+#define VS4L_VERTEXIOC_DQBUF _IOW('V', 7, struct vs4l_container_list)
+
+#endif
--- /dev/null
+menuconfig EXYNOS_VIPX_VERTEX
+ bool "Exynos VIPx driver"
+ select EXYNOS_VIPX_VERTEX_PLATFORM
+ help
+ This is a vision image processing unit
+
+if EXYNOS_VIPX_VERTEX
+
+source "drivers/vision/vipx_vertex/platform/Kconfig"
+
+endif
--- /dev/null
+#
+# Makefile for the VIPx common driver
+#
+
+obj-y += vertex-binary.o
+obj-y += vertex-compat-ioctl.o
+obj-y += vertex-ioctl.o
+obj-y += vertex-debug.o
+obj-y += vertex-device.o
+obj-y += vertex-graph.o
+obj-y += vertex-graphmgr.o
+obj-y += vertex-interface.o
+obj-y += vertex-io.o
+obj-y += vertex-mailbox.o
+obj-y += vertex-memory.o
+obj-y += vertex-queue.o
+obj-y += vertex-slab.o
+obj-y += vertex-system.o
+obj-y += vertex-taskmgr.o
+obj-y += vertex-time.o
+obj-y += vertex-core.o
+obj-y += vertex-context.o
+obj-y += vertex-kernel-binary.o
+obj-y += vertex-util.o
+
+obj-$(CONFIG_EXYNOS_VIPX_VERTEX_PLATFORM) += platform/
+
+ccflags-y += -Idrivers/vision/vipx_vertex
--- /dev/null
+menuconfig EXYNOS_VIPX_VERTEX_PLATFORM
+ bool "Select Platform"
+ help
+ This is a selection of platform
+
+config EXYNOS_VIPX_VERTEX_EXYNOS9610
+ bool "Use Exynos9610 platform"
+ depends on EXYNOS_VIPX_VERTEX_PLATFORM
+ depends on SOC_EXYNOS9610
+ help
+ This is a exynos9610 platform
--- /dev/null
+#
+# Makefile for the VIPx driver depends on platform
+#
+
+obj-$(CONFIG_EXYNOS_VIPX_VERTEX_EXYNOS9610) += exynos9610/
--- /dev/null
+#
+# Makefile for the VIPx driver depends on exynos9610 platform
+#
+
+obj-y += vertex-clk-exynos9610.o
+obj-y += vertex-ctrl-exynos9610.o
+
+ccflags-y += -Idrivers/vision/vipx_vertex
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-system.h"
+#include "platform/vertex-clk.h"
+
+enum vertex_clk_id {
+ VERTEX_CLK_UMUX_CLKCMU_VIPX1_BUS,
+ VERTEX_CLK_GATE_VIPX1_QCH,
+ VERTEX_CLK_UMUX_CLKCMU_VIPX2_BUS,
+ VERTEX_CLK_GATE_VIPX2_QCH,
+ VERTEX_CLK_GATE_VIPX2_QCH_LOCAL,
+ VERTEX_CLK_MAX
+};
+
+static struct vertex_clk vertex_exynos9610_clk_array[] = {
+ { NULL, "UMUX_CLKCMU_VIPX1_BUS" },
+ { NULL, "GATE_VIPX1_QCH" },
+ { NULL, "UMUX_CLKCMU_VIPX2_BUS" },
+ { NULL, "GATE_VIPX2_QCH" },
+ { NULL, "GATE_VIPX2_QCH_LOCAL" },
+};
+
+static int vertex_exynos9610_clk_init(struct vertex_system *sys)
+{
+ int ret;
+ int index;
+ const char *name;
+ struct clk *clk;
+
+ vertex_enter();
+ if (ARRAY_SIZE(vertex_exynos9610_clk_array) != VERTEX_CLK_MAX) {
+ ret = -EINVAL;
+ vertex_err("clock array size is invalid (%zu/%d)\n",
+ ARRAY_SIZE(vertex_exynos9610_clk_array),
+ VERTEX_CLK_MAX);
+ goto p_err;
+ }
+
+ for (index = 0; index < VERTEX_CLK_MAX; ++index) {
+ name = vertex_exynos9610_clk_array[index].name;
+ if (!name) {
+ ret = -EINVAL;
+ vertex_err("clock name is NULL (%d)\n", index);
+ goto p_err;
+ }
+
+ clk = devm_clk_get(sys->dev, name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ vertex_err("Failed to get clock(%s/%d) (%d)\n",
+ name, index, ret);
+ goto p_err;
+ }
+
+ vertex_exynos9610_clk_array[index].clk = clk;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void vertex_exynos9610_clk_deinit(struct vertex_system *sys)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+static int vertex_exynos9610_clk_cfg(struct vertex_system *sys)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_exynos9610_clk_on(struct vertex_system *sys)
+{
+ int ret;
+ int index;
+ const char *name;
+ struct clk *clk;
+
+ vertex_enter();
+ for (index = 0; index < VERTEX_CLK_MAX; ++index) {
+ name = vertex_exynos9610_clk_array[index].name;
+ clk = vertex_exynos9610_clk_array[index].clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ vertex_err("Failed to enable clock(%s/%d) (%d)\n",
+ name, index, ret);
+ goto p_err;
+ }
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_exynos9610_clk_off(struct vertex_system *sys)
+{
+ int index;
+ const char *name;
+ struct clk *clk;
+
+ vertex_enter();
+ for (index = VERTEX_CLK_MAX - 1; index >= 0; --index) {
+ name = vertex_exynos9610_clk_array[index].name;
+ clk = vertex_exynos9610_clk_array[index].clk;
+
+ clk_disable_unprepare(clk);
+ }
+
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_exynos9610_clk_dump(struct vertex_system *sys)
+{
+ int index;
+ const char *name;
+ struct clk *clk;
+ unsigned long freq;
+
+ vertex_enter();
+ for (index = 0; index < VERTEX_CLK_MAX; ++index) {
+ name = vertex_exynos9610_clk_array[index].name;
+ clk = vertex_exynos9610_clk_array[index].clk;
+
+ freq = clk_get_rate(clk);
+ vertex_info("%30s(%d) : %9lu Hz\n", name, index, freq);
+ }
+
+ vertex_leave();
+ return 0;
+}
+
+const struct vertex_clk_ops vertex_clk_ops = {
+ .clk_init = vertex_exynos9610_clk_init,
+ .clk_deinit = vertex_exynos9610_clk_deinit,
+ .clk_cfg = vertex_exynos9610_clk_cfg,
+ .clk_on = vertex_exynos9610_clk_on,
+ .clk_off = vertex_exynos9610_clk_off,
+ .clk_dump = vertex_exynos9610_clk_dump
+};
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "vertex-log.h"
+#include "vertex-system.h"
+#include "platform/vertex-ctrl.h"
+
+enum vertex_reg_ss1_id {
+ REG_SS1_VERSION_ID,
+ REG_SS1_QCHANNEL,
+ REG_SS1_IRQ_VIPX_TO_HOST,
+ REG_SS1_IRQ0_HOST_TO_VIPX,
+ REG_SS1_IRQ1_HOST_TO_VIPX,
+ REG_SS1_GLOBAL_CTRL,
+ REG_SS1_CORTEX_CONTROL,
+ REG_SS1_CPU_CTRL,
+ REG_SS1_PROGRAM_COUNTER,
+ REG_SS1_DEBUG0,
+ REG_SS1_DEBUG1,
+ REG_SS1_DEBUG2,
+ REG_SS1_DEBUG3,
+ REG_SS1_MAX
+};
+
+enum vertex_reg_ss2_id {
+ REG_SS2_QCHANNEL,
+ REG_SS2_GLOBAL_CTRL,
+ REG_SS2_MAX
+};
+
+static const struct vertex_reg regs_ss1[] = {
+ {0x00000, "SS1_VERSION_ID"},
+ {0x00004, "SS1_QCHANNEL"},
+ {0x00008, "SS1_IRQ_VIPX_TO_HOST"},
+ {0x0000C, "SS1_IRQ0_HOST_TO_VIPX"},
+ {0x00010, "SS1_IRQ1_HOST_TO_VIPX"},
+ {0x00014, "SS1_GLOBAL_CTRL"},
+ {0x00018, "SS1_CORTEX_CONTROL"},
+ {0x0001C, "SS1_CPU_CTRL"},
+ {0x00044, "SS1_PROGRAM_COUNTER"},
+ {0x00048, "SS1_DEBUG0"},
+ {0x0004C, "SS1_DEBUG1"},
+ {0x00050, "SS1_DEBUG2"},
+ {0x00054, "SS1_DEBUG3"},
+};
+
+static const struct vertex_reg regs_ss2[] = {
+ {0x00000, "SS2_QCHANNEL"},
+ {0x00004, "SS2_GLOBAL_CTRL"},
+};
+
+static int vertex_exynos9610_ctrl_reset(struct vertex_system *sys)
+{
+ void __iomem *ss1, *ss2;
+ unsigned int val;
+
+ vertex_enter();
+ ss1 = sys->reg_ss[VERTEX_REG_SS1];
+ ss2 = sys->reg_ss[VERTEX_REG_SS2];
+
+ /* TODO: check delay */
+ val = readl(ss1 + regs_ss1[REG_SS1_GLOBAL_CTRL].offset);
+ writel(val | 0xF1, ss1 + regs_ss1[REG_SS1_GLOBAL_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss2 + regs_ss2[REG_SS2_GLOBAL_CTRL].offset);
+ writel(val | 0xF1, ss2 + regs_ss2[REG_SS2_GLOBAL_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_CPU_CTRL].offset);
+ writel(val | 0x1, ss1 + regs_ss1[REG_SS1_CPU_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ writel(val | 0x1, ss1 + regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ udelay(10);
+
+ writel(0x0, ss1 + regs_ss1[REG_SS1_CPU_CTRL].offset);
+ udelay(10);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_QCHANNEL].offset);
+ writel(val | 0x1, ss1 + regs_ss1[REG_SS1_QCHANNEL].offset);
+ udelay(10);
+
+ val = readl(ss2 + regs_ss2[REG_SS2_QCHANNEL].offset);
+ writel(val | 0x1, ss2 + regs_ss2[REG_SS2_QCHANNEL].offset);
+ udelay(10);
+
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_exynos9610_ctrl_start(struct vertex_system *sys)
+{
+ vertex_enter();
+ writel(0x0, sys->reg_ss[VERTEX_REG_SS1] +
+ regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_exynos9610_ctrl_get_interrupt(struct vertex_system *sys,
+ int direction)
+{
+ vertex_enter();
+ writel(0x0, sys->reg_ss[VERTEX_REG_SS1] +
+ regs_ss1[REG_SS1_CORTEX_CONTROL].offset);
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_exynos9610_ctrl_set_interrupt(struct vertex_system *sys,
+ int direction)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_exynos9610_ctrl_debug_dump(struct vertex_system *sys)
+{
+ void __iomem *ss1;
+ unsigned int val;
+ const char *name;
+
+ vertex_enter();
+ ss1 = sys->reg_ss[VERTEX_REG_SS1];
+
+ val = readl(ss1 + regs_ss1[REG_SS1_PROGRAM_COUNTER].offset);
+ name = regs_ss1[REG_SS1_PROGRAM_COUNTER].name;
+ vertex_info("[%20s] 0x%08x\n", name, val);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_DEBUG0].offset);
+ name = regs_ss1[REG_SS1_DEBUG0].name;
+ vertex_info("[%20s] 0x%08x\n", name, val);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_DEBUG1].offset);
+ name = regs_ss1[REG_SS1_DEBUG1].name;
+ vertex_info("[%20s] 0x%08x\n", name, val);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_DEBUG2].offset);
+ name = regs_ss1[REG_SS1_DEBUG2].name;
+ vertex_info("[%20s] 0x%08x\n", name, val);
+
+ val = readl(ss1 + regs_ss1[REG_SS1_DEBUG3].offset);
+ name = regs_ss1[REG_SS1_DEBUG3].name;
+ vertex_info("[%20s] 0x%08x\n", name, val);
+
+ vertex_leave();
+ return 0;
+}
+
+const struct vertex_ctrl_ops vertex_ctrl_ops = {
+ .reset = vertex_exynos9610_ctrl_reset,
+ .start = vertex_exynos9610_ctrl_start,
+ .get_interrupt = vertex_exynos9610_ctrl_get_interrupt,
+ .set_interrupt = vertex_exynos9610_ctrl_set_interrupt,
+ .debug_dump = vertex_exynos9610_ctrl_debug_dump,
+};
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_CLK_H__
+#define __VERTEX_CLK_H__
+
+#include <linux/clk.h>
+
+struct vertex_system;
+
+struct vertex_clk {
+ struct clk *clk;
+ const char *name;
+};
+
+struct vertex_clk_ops {
+ int (*clk_init)(struct vertex_system *sys);
+ void (*clk_deinit)(struct vertex_system *sys);
+ int (*clk_cfg)(struct vertex_system *sys);
+ int (*clk_on)(struct vertex_system *sys);
+ int (*clk_off)(struct vertex_system *sys);
+ int (*clk_dump)(struct vertex_system *sys);
+};
+
+extern const struct vertex_clk_ops vertex_clk_ops;
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_CTRL_H__
+#define __VERTEX_CTRL_H__
+
+struct vertex_system;
+
+enum {
+ VERTEX_REG_SS1,
+ VERTEX_REG_SS2,
+ VERTEX_REG_MAX
+};
+
+struct vertex_reg {
+ const unsigned int offset;
+ const char *name;
+};
+
+struct vertex_ctrl_ops {
+ int (*reset)(struct vertex_system *sys);
+ int (*start)(struct vertex_system *sys);
+ int (*get_interrupt)(struct vertex_system *sys, int direction);
+ int (*set_interrupt)(struct vertex_system *sys, int direction);
+ int (*debug_dump)(struct vertex_system *sys);
+};
+
+extern const struct vertex_ctrl_ops vertex_ctrl_ops;
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+
+#include "vertex-log.h"
+#include "vertex-system.h"
+#include "vertex-binary.h"
+
+static noinline_for_stack loff_t __vertex_binary_get_file_size(
+ struct file *file)
+{
+ int ret;
+ struct kstat st;
+ unsigned int request_mask = (STATX_MODE | STATX_SIZE);
+
+ vertex_enter();
+ ret = vfs_getattr(&file->f_path, &st, request_mask, KSTAT_QUERY_FLAGS);
+ if (ret) {
+ vertex_err("vfs_getattr failed (%d)\n", ret);
+ goto p_err;
+ }
+
+ if (!S_ISREG(st.mode)) {
+ ret = -EINVAL;
+ vertex_err("file mode is not S_ISREG\n");
+ goto p_err;
+ }
+
+ vertex_leave();
+ return st.size;
+p_err:
+ return (loff_t)ret;
+}
+
+static int __vertex_binary_file_read(struct vertex_binary *bin,
+ const char *path, const char *name, void *target, size_t size)
+{
+ int ret;
+ mm_segment_t old_fs;
+ char fname[VERTEX_FW_NAME_LEN];
+ struct file *fp;
+ loff_t fsize, pos = 0;
+ ssize_t nread;
+
+ vertex_enter();
+ snprintf(fname, sizeof(fname), "%s/%s", path, name);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ fp = filp_open(fname, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ set_fs(old_fs);
+ ret = PTR_ERR(fp);
+ vertex_err("filp_open(%s) is failed (%d)\n", fname, ret);
+ goto p_err_open;
+ }
+
+ fsize = __vertex_binary_get_file_size(fp);
+ if (fsize <= 0) {
+ ret = (int)fsize;
+ goto p_err_size;
+ }
+
+ if (fsize > size) {
+ ret = -EIO;
+ vertex_err("file(%s) size is over (%lld > %zu)\n",
+ fname, fsize, size);
+ goto p_err_size;
+ }
+
+ nread = kernel_read(fp, target, fsize, &pos);
+ filp_close(fp, current->files);
+ set_fs(old_fs);
+
+ if (nread < 0) {
+ ret = (int)nread;
+ vertex_err("kernel_read(%s) is fail (%d)\n", fname, ret);
+ goto p_err_read;
+ }
+
+ vertex_leave();
+ return 0;
+p_err_read:
+p_err_size:
+p_err_open:
+ return ret;
+}
+
+static int __vertex_binary_firmware_load(struct vertex_binary *bin,
+ const char *name, void *target, size_t size)
+{
+ int ret;
+ const struct firmware *fw_blob;
+
+ vertex_enter();
+ if (!target) {
+ ret = -EINVAL;
+ vertex_err("binary(%s) memory is NULL\n", name);
+ goto p_err_target;
+ }
+
+ ret = request_firmware(&fw_blob, name, bin->dev);
+ if (ret) {
+ vertex_err("request_firmware(%s) is fail (%d)\n", name, ret);
+ goto p_err_req;
+ }
+
+ if (fw_blob->size > size) {
+ ret = -EIO;
+ vertex_err("binary(%s) size is over (%ld > %ld)\n",
+ name, fw_blob->size, size);
+ goto p_err_size;
+ }
+
+ memcpy(target, fw_blob->data, fw_blob->size);
+ release_firmware(fw_blob);
+
+ vertex_leave();
+ return 0;
+p_err_size:
+ release_firmware(fw_blob);
+p_err_req:
+p_err_target:
+ return ret;
+}
+
+int vertex_binary_read(struct vertex_binary *bin, const char *path,
+ const char *name, void *target, size_t size)
+{
+ int ret;
+
+ vertex_enter();
+
+ if (path)
+ ret = __vertex_binary_file_read(bin, path, name, target, size);
+ else
+ ret = __vertex_binary_firmware_load(bin, name, target, size);
+
+ vertex_leave();
+ return ret;
+}
+
+int vertex_binary_write(struct vertex_binary *bin, const char *path,
+ const char *name, void *target, size_t size)
+{
+ int ret;
+ char fname[VERTEX_FW_NAME_LEN];
+ mm_segment_t old_fs;
+ struct file *fp;
+ ssize_t nwrite;
+ loff_t pos = 0;
+
+ vertex_enter();
+ snprintf(fname, sizeof(fname), "%s/%s", path, name);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ fp = filp_open(fname, O_RDWR | O_CREAT, 0);
+ if (IS_ERR(fp)) {
+ set_fs(old_fs);
+ ret = PTR_ERR(fp);
+ vertex_err("filp_open(%s) is fail (%d)\n", fname, ret);
+ goto p_err;
+ }
+
+ nwrite = kernel_write(fp, target, size, &pos);
+ filp_close(fp, current->files);
+ set_fs(old_fs);
+
+ if (nwrite < 0) {
+ ret = (int)nwrite;
+ vertex_err("kernel_write(%s) is fail (%d)\n", fname, ret);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_binary_init(struct vertex_system *sys)
+{
+ struct vertex_binary *bin;
+
+ vertex_enter();
+ bin = &sys->binary;
+ bin->dev = sys->dev;
+ vertex_leave();
+ return 0;
+}
+
+void vertex_binary_deinit(struct vertex_binary *bin)
+{
+ vertex_enter();
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_BINARY_H__
+#define __VERTEX_BINARY_H__
+
+#include <linux/device.h>
+
+#define VERTEX_DEBUG_BIN_PATH "/data"
+
+#define VERTEX_FW_DRAM_NAME "CC_DRAM_CODE_FLASH_HIFI.bin"
+#define VERTEX_FW_ITCM_NAME "CC_ITCM_CODE_FLASH_HIFI.bin"
+#define VERTEX_FW_DTCM_NAME "CC_DTCM_CODE_FLASH_HIFI.bin"
+
+#define VERTEX_FW_NAME_LEN (100)
+#define VERTEX_VERSION_SIZE (42)
+
+struct vertex_system;
+
+struct vertex_binary {
+ struct device *dev;
+};
+
+int vertex_binary_read(struct vertex_binary *bin, const char *path,
+ const char *name, void *target, size_t size);
+int vertex_binary_write(struct vertex_binary *bin, const char *path,
+ const char *name, void *target, size_t size);
+
+int vertex_binary_init(struct vertex_system *sys);
+void vertex_binary_deinit(struct vertex_binary *bin);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_COMMON_TYPE_H__
+#define __VERTEX_COMMON_TYPE_H__
+
+#define MAX_INPUT_NUM (8)
+#define MAX_OUTPUT_NUM (8)
+#define MAX_PLANE_NUM (3)
+
+enum vertex_common_addr_type {
+ VERTEX_COMMON_V_ADDR,
+ VERTEX_COMMON_DV_ADDR,
+ VERTEX_COMMON_FD,
+};
+
+enum vertex_common_mem_attr {
+ VERTEX_COMMON_CACHEALBE,
+ VERTEX_COMMON_NON_CACHEALBE,
+};
+
+enum vertex_common_mem_type {
+ VERTEX_COMMON_MEM_ION,
+ VERTEX_COMMON_MEM_MALLOC,
+ VERTEX_COMMON_MEM_ASHMEM
+};
+
+struct vertex_common_mem {
+ int fd;
+ unsigned int iova;
+ unsigned int size;
+ unsigned int offset;
+ unsigned char addr_type;
+ unsigned char mem_attr;
+ unsigned char mem_type;
+ unsigned char reserved[5];
+};
+
+union vertex_common_global_id {
+ struct {
+ uint32_t head :2;
+ uint32_t target :2;
+ uint32_t model_id :12;
+ uint32_t msg_id :16;
+ } head;
+ unsigned int num;
+};
+
+struct vertex_common_graph_info {
+ struct vertex_common_mem graph;
+ struct vertex_common_mem temp_buf;
+ struct vertex_common_mem weight;
+ struct vertex_common_mem bias;
+ unsigned int gid;
+ unsigned int user_para_size;
+ //unsigned char user_para[0] // varialbe size
+};
+
+struct vertex_common_execute_info {
+ unsigned int gid;
+ unsigned int macro_sg_offset;
+ unsigned int num_input;
+ unsigned int num_output;
+ struct vertex_common_mem input[MAX_INPUT_NUM][MAX_PLANE_NUM];
+ struct vertex_common_mem output[MAX_OUTPUT_NUM][MAX_PLANE_NUM];
+ unsigned int user_para_size;
+ unsigned int reserved;
+ //unsigned char user_para[0] // varialbe size
+};
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+
+#include "vertex-log.h"
+#include "vertex-core.h"
+#include "vertex-context.h"
+#include "vertex-ioctl.h"
+
+struct vs4l_graph32 {
+ __u32 id;
+ __u32 priority;
+ __u32 time;
+ __u32 flags;
+ __u32 size;
+ compat_caddr_t addr;
+};
+
+struct vs4l_format32 {
+ __u32 target;
+ __u32 format;
+ __u32 plane;
+ __u32 width;
+ __u32 height;
+};
+
+struct vs4l_format_list32 {
+ __u32 direction;
+ __u32 count;
+ compat_caddr_t formats;
+};
+
+struct vs4l_param32 {
+ __u32 target;
+ compat_caddr_t addr;
+ __u32 offset;
+ __u32 size;
+};
+
+struct vs4l_param_list32 {
+ __u32 count;
+ compat_caddr_t params;
+};
+
+struct vs4l_ctrl32 {
+ __u32 ctrl;
+ __u32 value;
+};
+
+struct vs4l_roi32 {
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+};
+
+struct vs4l_buffer32 {
+ struct vs4l_roi roi;
+ union {
+ compat_caddr_t userptr;
+ __s32 fd;
+ } m;
+ compat_caddr_t reserved;
+};
+
+struct vs4l_container32 {
+ __u32 type;
+ __u32 target;
+ __u32 memory;
+ __u32 reserved[4];
+ __u32 count;
+ compat_caddr_t buffers;
+};
+
+struct vs4l_container_list32 {
+ __u32 direction;
+ __u32 id;
+ __u32 index;
+ __u32 flags;
+ struct compat_timeval timestamp[6];
+ __u32 count;
+ __u32 user_params[MAX_NUM_OF_USER_PARAMS];
+ compat_caddr_t containers;
+};
+
+#define VS4L_VERTEXIOC_S_GRAPH32 \
+ _IOW('V', 0, struct vs4l_graph32)
+#define VS4L_VERTEXIOC_S_FORMAT32 \
+ _IOW('V', 1, struct vs4l_format_list32)
+#define VS4L_VERTEXIOC_S_PARAM32 \
+ _IOW('V', 2, struct vs4l_param_list32)
+#define VS4L_VERTEXIOC_S_CTRL32 \
+ _IOW('V', 3, struct vs4l_ctrl32)
+#define VS4L_VERTEXIOC_QBUF32 \
+ _IOW('V', 6, struct vs4l_container_list32)
+#define VS4L_VERTEXIOC_DQBUF32 \
+ _IOW('V', 7, struct vs4l_container_list32)
+
+struct vertex_ioc_load_kernel_binary32 {
+ unsigned int size;
+ unsigned int global_id;
+ int kernel_fd;
+ unsigned int kernel_size;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vertex_ioc_load_graph_info32 {
+ unsigned int size;
+ struct vertex_common_graph_info graph_info;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vertex_ioc_unload_graph_info32 {
+ unsigned int size;
+ struct vertex_common_graph_info graph_info;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vertex_ioc_execute_submodel32 {
+ unsigned int size;
+ struct vertex_common_execute_info execute_info;
+ int ret;
+ struct compat_timespec timestamp[4];
+ int reserved[2];
+};
+
+#define VERTEX_IOC_LOAD_KERNEL_BINARY32 \
+ _IOWR('V', 0, struct vertex_ioc_load_kernel_binary32)
+#define VERTEX_IOC_LOAD_GRAPH_INFO32 \
+ _IOWR('V', 1, struct vertex_ioc_load_graph_info32)
+#define VERTEX_IOC_UNLOAD_GRAPH_INFO32 \
+ _IOWR('V', 2, struct vertex_ioc_unload_graph_info32)
+#define VERTEX_IOC_EXECUTE_SUBMODEL32 \
+ _IOWR('V', 3, struct vertex_ioc_execute_submodel32)
+
+static int __vertex_ioctl_get_graph32(struct vs4l_graph *karg,
+ struct vs4l_graph32 __user *uarg)
+{
+ int ret;
+ compat_caddr_t taddr;
+
+ vertex_enter();
+ if (get_user(karg->id, &uarg->id) ||
+ get_user(karg->priority, &uarg->priority) ||
+ get_user(karg->time, &uarg->time) ||
+ get_user(karg->flags, &uarg->flags) ||
+ get_user(karg->size, &uarg->size) ||
+ get_user(taddr, &uarg->addr)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [S_GRAPH32]\n");
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_graph32(struct vs4l_graph *karg,
+ struct vs4l_graph32 __user *uarg)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_format32(struct vs4l_format_list *karg,
+ struct vs4l_format_list32 __user *uarg)
+{
+ int ret;
+ unsigned int idx;
+ size_t size;
+ compat_caddr_t taddr;
+ struct vs4l_format32 __user *uformat;
+ struct vs4l_format *kformat;
+
+ vertex_enter();
+ if (get_user(karg->direction, &uarg->direction) ||
+ get_user(karg->count, &uarg->count) ||
+ get_user(taddr, &uarg->formats)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [S_FORMAT32]\n");
+ goto p_err;
+ }
+
+ uformat = compat_ptr(taddr);
+
+ size = karg->count * sizeof(*kformat);
+ kformat = kzalloc(size, GFP_KERNEL);
+ if (!kformat) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kformat (%zu)\n", size);
+ goto p_err;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ if (get_user(kformat[idx].target, &uformat[idx].target) ||
+ get_user(kformat[idx].format,
+ &uformat[idx].format) ||
+ get_user(kformat[idx].plane,
+ &uformat[idx].plane) ||
+ get_user(kformat[idx].width,
+ &uformat[idx].width) ||
+ get_user(kformat[idx].height,
+ &uformat[idx].height)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [S_FORMAT32]\n");
+ goto p_err_free;
+ }
+ }
+ karg->formats = kformat;
+
+ vertex_leave();
+ return 0;
+p_err_free:
+ kfree(kformat);
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_format32(struct vs4l_format_list *karg,
+ struct vs4l_format_list32 __user *uarg)
+{
+ vertex_enter();
+ kfree(karg->formats);
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_param32(struct vs4l_param_list *karg,
+ struct vs4l_param_list32 __user *uarg)
+{
+ int ret;
+ unsigned int idx;
+ size_t size;
+ compat_caddr_t taddr;
+ struct vs4l_param32 __user *uparam;
+ struct vs4l_param *kparam;
+
+ vertex_enter();
+ if (get_user(karg->count, &uarg->count) ||
+ get_user(taddr, &uarg->params)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [S_PARAM32]\n");
+ goto p_err;
+ }
+
+ uparam = compat_ptr(taddr);
+
+ size = karg->count * sizeof(*kparam);
+ kparam = kzalloc(size, GFP_KERNEL);
+ if (!kparam) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kparam (%zu)\n", size);
+ goto p_err;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ if (get_user(kparam[idx].target, &uparam[idx].target) ||
+ get_user(kparam[idx].addr, &uparam[idx].addr) ||
+ get_user(kparam[idx].offset,
+ &uparam[idx].offset) ||
+ get_user(kparam[idx].size,
+ &uparam[idx].size)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [S_PARAM32]\n");
+ goto p_err_free;
+ }
+ }
+
+ karg->params = kparam;
+ vertex_leave();
+ return 0;
+p_err_free:
+ kfree(kparam);
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_param32(struct vs4l_param_list *karg,
+ struct vs4l_param_list32 __user *uarg)
+{
+ vertex_enter();
+ kfree(karg->params);
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_ctrl32(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl32 __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ if (get_user(karg->ctrl, &uarg->ctrl) ||
+ get_user(karg->value, &uarg->value)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [S_CTRL32]\n");
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_ctrl32(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl32 __user *uarg)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_container32(struct vs4l_container_list *karg,
+ struct vs4l_container_list32 __user *uarg)
+{
+ int ret;
+ size_t size;
+ compat_caddr_t taddr;
+ unsigned int c_count, b_count;
+ struct vs4l_container32 *ucon;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer32 *ubuf;
+ struct vs4l_buffer *kbuf;
+
+ vertex_enter();
+ if (get_user(karg->direction, &uarg->direction) ||
+ get_user(karg->id, &uarg->id) ||
+ get_user(karg->index, &uarg->index) ||
+ get_user(karg->flags, &uarg->flags) ||
+ get_user(karg->count, &uarg->count) ||
+ copy_from_user(karg->user_params, uarg->user_params,
+ sizeof(karg->user_params)) ||
+ get_user(taddr, &uarg->containers)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [CON32]\n");
+ goto p_err;
+ }
+
+ ucon = compat_ptr(taddr);
+
+ size = karg->count * sizeof(*kcon);
+ kcon = kzalloc(size, GFP_KERNEL);
+ if (!kcon) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kcon (%zu)\n", size);
+ goto p_err;
+ }
+
+ karg->containers = kcon;
+
+ for (c_count = 0; c_count < karg->count; ++c_count) {
+ if (get_user(kcon[c_count].type, &ucon[c_count].type) ||
+ get_user(kcon[c_count].target,
+ &ucon[c_count].target) ||
+ get_user(kcon[c_count].memory,
+ &ucon[c_count].memory) ||
+ copy_from_user(kcon[c_count].reserved,
+ ucon[c_count].reserved,
+ sizeof(kcon[c_count].reserved)) ||
+ get_user(kcon[c_count].count,
+ &ucon[c_count].count) ||
+ get_user(taddr, &ucon[c_count].buffers)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [CON32]\n");
+ goto p_err_free;
+ }
+
+ ubuf = compat_ptr(taddr);
+
+ size = kcon[c_count].count * sizeof(*kbuf);
+ kbuf = kzalloc(size, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kbuf (%zu)\n", size);
+ goto p_err_free;
+ }
+
+ kcon[c_count].buffers = kbuf;
+
+ for (b_count = 0; b_count < kcon[c_count].count;
+ ++b_count) {
+ if (get_user(kbuf[b_count].roi.x,
+ &ubuf[b_count].roi.x) ||
+ get_user(kbuf[b_count].roi.y,
+ &ubuf[b_count].roi.y) ||
+ get_user(kbuf[b_count].roi.w,
+ &ubuf[b_count].roi.w) ||
+ get_user(kbuf[b_count].roi.h,
+ &ubuf[b_count].roi.h)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [CON32]\n");
+ goto p_err_free;
+ }
+
+ if (kcon[c_count].memory == VS4L_MEMORY_DMABUF) {
+ if (get_user(kbuf[b_count].m.fd,
+ &ubuf[b_count].m.fd)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [CON32]\n");
+ goto p_err_free;
+ }
+ } else {
+ if (get_user(kbuf[b_count].m.userptr,
+ &ubuf[b_count].m.userptr)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [CON32]\n");
+ goto p_err_free;
+ }
+ }
+ }
+ }
+
+ vertex_leave();
+ return 0;
+p_err_free:
+ for (c_count = 0; c_count < karg->count; ++c_count)
+ kfree(kcon[c_count].buffers);
+
+ kfree(kcon);
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_container32(struct vs4l_container_list *karg,
+ struct vs4l_container_list32 __user *uarg)
+{
+ unsigned int idx;
+
+ vertex_enter();
+ if (put_user(karg->id, &uarg->id) ||
+ put_user(karg->index, &uarg->index) ||
+ put_user(karg->flags, &uarg->flags) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_usec,
+ &uarg->timestamp[0].tv_usec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_usec,
+ &uarg->timestamp[1].tv_usec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_usec,
+ &uarg->timestamp[2].tv_usec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_usec,
+ &uarg->timestamp[3].tv_usec) ||
+ put_user(karg->timestamp[4].tv_sec,
+ &uarg->timestamp[4].tv_sec) ||
+ put_user(karg->timestamp[4].tv_usec,
+ &uarg->timestamp[4].tv_usec) ||
+ put_user(karg->timestamp[5].tv_sec,
+ &uarg->timestamp[5].tv_sec) ||
+ put_user(karg->timestamp[5].tv_usec,
+ &uarg->timestamp[5].tv_usec) ||
+ copy_to_user(uarg->user_params, karg->user_params,
+ sizeof(karg->user_params))) {
+ vertex_err("Copy failed to user [CON32]\n");
+ }
+
+ for (idx = 0; idx < karg->count; ++idx)
+ kfree(karg->containers[idx].buffers);
+
+ kfree(karg->containers);
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_load_kernel_binary32(
+ struct vertex_ioc_load_kernel_binary *karg,
+ struct vertex_ioc_load_kernel_binary32 __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ if (get_user(karg->size, &uarg->size) ||
+ get_user(karg->global_id, &uarg->global_id) ||
+ get_user(karg->kernel_fd, &uarg->kernel_fd) ||
+ get_user(karg->kernel_size, &uarg->kernel_size)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [Load kernel binary(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_load_kernel_binary32(
+ struct vertex_ioc_load_kernel_binary *karg,
+ struct vertex_ioc_load_kernel_binary32 __user *uarg)
+{
+ vertex_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vertex_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_load_graph_info32(
+ struct vertex_ioc_load_graph_info *karg,
+ struct vertex_ioc_load_graph_info32 __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ if (get_user(karg->size, &uarg->size)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [Load graph info(32)]\n");
+ goto p_err;
+ }
+
+ ret = copy_from_user(&karg->graph_info, &uarg->graph_info,
+ sizeof(uarg->graph_info));
+ if (ret) {
+ vertex_err("Copy failed from user [Load graph info(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_load_graph_info32(
+ struct vertex_ioc_load_graph_info *karg,
+ struct vertex_ioc_load_graph_info32 __user *uarg)
+{
+ vertex_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vertex_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_unload_graph_info32(
+ struct vertex_ioc_unload_graph_info *karg,
+ struct vertex_ioc_unload_graph_info32 __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ if (get_user(karg->size, &uarg->size)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [Unload graph info(32)]\n");
+ goto p_err;
+ }
+
+ ret = copy_from_user(&karg->graph_info, &uarg->graph_info,
+ sizeof(uarg->graph_info));
+ if (ret) {
+ vertex_err("Copy failed from user [Unload graph info(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_unload_graph_info32(
+ struct vertex_ioc_unload_graph_info *karg,
+ struct vertex_ioc_unload_graph_info32 __user *uarg)
+{
+ vertex_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vertex_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_execute_submodel32(
+ struct vertex_ioc_execute_submodel *karg,
+ struct vertex_ioc_execute_submodel32 __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ if (get_user(karg->size, &uarg->size)) {
+ ret = -EFAULT;
+ vertex_err("Copy failed [Execute submodel(32)]\n");
+ goto p_err;
+ }
+
+ ret = copy_from_user(&karg->execute_info, &uarg->execute_info,
+ sizeof(uarg->execute_info));
+ if (ret) {
+ vertex_err("Copy failed from user [Execute submodel(32)]\n");
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_execute_submodel32(
+ struct vertex_ioc_execute_submodel *karg,
+ struct vertex_ioc_execute_submodel32 __user *uarg)
+{
+ vertex_enter();
+ if (put_user(karg->ret, &uarg->ret) ||
+ put_user(karg->timestamp[0].tv_sec,
+ &uarg->timestamp[0].tv_sec) ||
+ put_user(karg->timestamp[0].tv_nsec,
+ &uarg->timestamp[0].tv_nsec) ||
+ put_user(karg->timestamp[1].tv_sec,
+ &uarg->timestamp[1].tv_sec) ||
+ put_user(karg->timestamp[1].tv_nsec,
+ &uarg->timestamp[1].tv_nsec) ||
+ put_user(karg->timestamp[2].tv_sec,
+ &uarg->timestamp[2].tv_sec) ||
+ put_user(karg->timestamp[2].tv_nsec,
+ &uarg->timestamp[2].tv_nsec) ||
+ put_user(karg->timestamp[3].tv_sec,
+ &uarg->timestamp[3].tv_sec) ||
+ put_user(karg->timestamp[3].tv_nsec,
+ &uarg->timestamp[3].tv_nsec)) {
+ vertex_err("Copy failed to user [Load kernel binary(32)]\n");
+ }
+ vertex_leave();
+}
+
+long vertex_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct vertex_context *vctx;
+ const struct vertex_ioctl_ops *ops;
+ union vertex_ioc_arg karg;
+ void __user *uarg;
+
+ vertex_enter();
+ vctx = file->private_data;
+ ops = vctx->core->ioc_ops;
+ uarg = compat_ptr(arg);
+
+ switch (cmd) {
+ case VS4L_VERTEXIOC_S_GRAPH32:
+ ret = __vertex_ioctl_get_graph32(&karg.graph, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_graph(vctx, &karg.graph);
+ __vertex_ioctl_put_graph32(&karg.graph, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_FORMAT32:
+ ret = __vertex_ioctl_get_format32(&karg.flist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_format(vctx, &karg.flist);
+ __vertex_ioctl_put_format32(&karg.flist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_PARAM32:
+ ret = __vertex_ioctl_get_param32(&karg.plist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_param(vctx, &karg.plist);
+ __vertex_ioctl_put_param32(&karg.plist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_CTRL32:
+ ret = __vertex_ioctl_get_ctrl32(&karg.ctrl, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_ctrl(vctx, &karg.ctrl);
+ __vertex_ioctl_put_ctrl32(&karg.ctrl, uarg);
+ break;
+ case VS4L_VERTEXIOC_STREAM_ON:
+ ret = ops->streamon(vctx);
+ break;
+ case VS4L_VERTEXIOC_STREAM_OFF:
+ ret = ops->streamoff(vctx);
+ break;
+ case VS4L_VERTEXIOC_QBUF32:
+ ret = __vertex_ioctl_get_container32(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->qbuf(vctx, &karg.clist);
+ __vertex_ioctl_put_container32(&karg.clist, uarg);
+ break;
+ case VS4L_VERTEXIOC_DQBUF32:
+ ret = __vertex_ioctl_get_container32(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->dqbuf(vctx, &karg.clist);
+ __vertex_ioctl_put_container32(&karg.clist, uarg);
+ break;
+ case VERTEX_IOC_LOAD_KERNEL_BINARY32:
+ ret = __vertex_ioctl_get_load_kernel_binary32(&karg.kernel_bin,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_kernel_binary(vctx, &karg.kernel_bin);
+ __vertex_ioctl_put_load_kernel_binary32(&karg.kernel_bin, uarg);
+ break;
+ case VERTEX_IOC_LOAD_GRAPH_INFO32:
+ ret = __vertex_ioctl_get_load_graph_info32(&karg.load_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_graph_info(vctx, &karg.load_ginfo);
+ __vertex_ioctl_put_load_graph_info32(&karg.load_ginfo, uarg);
+ break;
+ case VERTEX_IOC_UNLOAD_GRAPH_INFO32:
+ ret = __vertex_ioctl_get_unload_graph_info32(&karg.unload_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->unload_graph_info(vctx, &karg.unload_ginfo);
+ __vertex_ioctl_put_unload_graph_info32(&karg.unload_ginfo,
+ uarg);
+ break;
+ case VERTEX_IOC_EXECUTE_SUBMODEL32:
+ ret = __vertex_ioctl_get_execute_submodel32(&karg.exec, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->execute_submodel(vctx, &karg.exec);
+ __vertex_ioctl_put_execute_submodel32(&karg.exec, uarg);
+ break;
+ default:
+ ret = -EINVAL;
+ vertex_err("ioc command(%x) is not supported\n", cmd);
+ goto p_err;
+ }
+
+ vertex_leave();
+p_err:
+ return ret;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_CONFIG_H__
+#define __VERTEX_CONFIG_H__
+
+/* CONFIG - GLOBAL OPTIONS */
+#define VERTEX_MAX_BUFFER (16)
+#define VERTEX_MAX_GRAPH (32)
+#define VERTEX_MAX_TASK VERTEX_MAX_BUFFER
+#define VERTEX_MAX_PLANES (3)
+
+#define VERTEX_MAX_PLANES (3)
+#define VERTEX_MAX_TASKDESC (VERTEX_MAX_GRAPH * 2)
+
+#define VERTEX_MAX_CONTEXT (16)
+
+/* CONFIG - PLATFORM CONFIG */
+#define VERTEX_STOP_WAIT_COUNT (200)
+//#define VERTEX_MBOX_EMULATOR
+
+/* CONFIG - DEBUG OPTIONS */
+#define DEBUG_LOG_CONCATE_ENABLE
+//#define DEBUG_TIME_MEASURE
+//#define DEBUG_LOG_IO_WRITE
+//#define DEBUG_LOG_DUMP_REGION
+//#define DEBUG_LOG_MEMORY
+//#define DEBUG_LOG_CALL_TREE
+//#define DEBUG_LOG_MAILBOX_DUMP
+
+//#define TEMP_RT_FRAMEWORK_TEST
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-util.h"
+#include "vertex-kernel-binary.h"
+#include "vertex-context.h"
+
+#define KERNEL_BINARY_DEBUG
+#define LOAD_GRAPH_INFO_DEBUG
+#define UNLOAD_GRAPH_INFO_DEBUG
+#define EXECUTE_DEBUG
+
+static int vertex_context_load_kernel_binary(struct vertex_context *vctx,
+ struct vertex_ioc_load_kernel_binary *kernel_bin)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_context_load_graph_info(struct vertex_context *vctx,
+ struct vertex_ioc_load_graph_info *ginfo)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_context_unload_graph_info(struct vertex_context *vctx,
+ struct vertex_ioc_unload_graph_info *ginfo)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_context_execute_submodel(struct vertex_context *vctx,
+ struct vertex_ioc_execute_submodel *execute)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static const struct vertex_context_ops vertex_context_ops = {
+ .load_kernel_binary = vertex_context_load_kernel_binary,
+ .load_graph_info = vertex_context_load_graph_info,
+ .unload_graph_info = vertex_context_unload_graph_info,
+ .execute_submodel = vertex_context_execute_submodel
+};
+
+struct vertex_context *vertex_context_create(struct vertex_core *core)
+{
+ int ret;
+ struct vertex_context *vctx;
+
+ vertex_enter();
+ vctx = kzalloc(sizeof(*vctx), GFP_KERNEL);
+ if (!vctx) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc context\n");
+ goto p_err;
+ }
+ vctx->core = core;
+
+ vctx->idx = vertex_util_bitmap_get_zero_bit(core->vctx_map,
+ VERTEX_MAX_CONTEXT);
+ if (vctx->idx == VERTEX_MAX_CONTEXT) {
+ ret = -ENOMEM;
+ vertex_err("Failed to get idx of context\n");
+ goto p_err_bitmap;
+ }
+
+ core->vctx_count++;
+ list_add_tail(&vctx->list, &core->vctx_list);
+
+ mutex_init(&vctx->lock);
+
+ vctx->vops = &vertex_context_ops;
+ vctx->binary_count = 0;
+ INIT_LIST_HEAD(&vctx->binary_list);
+ spin_lock_init(&vctx->binary_slock);
+
+ vertex_queue_init(vctx);
+
+ vctx->state = BIT(VERTEX_CONTEXT_OPEN);
+ vertex_leave();
+ return vctx;
+p_err_bitmap:
+ kfree(vctx);
+p_err:
+ return ERR_PTR(ret);
+}
+
+void vertex_context_destroy(struct vertex_context *vctx)
+{
+ struct vertex_core *core;
+
+ vertex_enter();
+ core = vctx->core;
+
+ if (vctx->binary_count)
+ vertex_kernel_binary_all_remove(vctx);
+
+ mutex_destroy(&vctx->lock);
+ list_del(&vctx->list);
+ core->vctx_count--;
+ vertex_util_bitmap_clear_bit(core->vctx_map, vctx->idx);
+ kfree(vctx);
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_CONTEXT_H__
+#define __VERTEX_CONTEXT_H__
+
+#include <linux/mutex.h>
+
+#include "vertex-queue.h"
+#include "vertex-graph.h"
+#include "vertex-ioctl.h"
+#include "vertex-core.h"
+
+enum vertex_context_state {
+ VERTEX_CONTEXT_OPEN,
+ VERTEX_CONTEXT_GRAPH,
+ VERTEX_CONTEXT_FORMAT,
+ VERTEX_CONTEXT_START,
+ VERTEX_CONTEXT_STOP
+};
+
+struct vertex_context;
+
+struct vertex_context_ops {
+ int (*load_kernel_binary)(struct vertex_context *vctx,
+ struct vertex_ioc_load_kernel_binary *kernel_bin);
+ int (*load_graph_info)(struct vertex_context *vctx,
+ struct vertex_ioc_load_graph_info *ginfo);
+ int (*unload_graph_info)(struct vertex_context *vctx,
+ struct vertex_ioc_unload_graph_info *ginfo);
+ int (*execute_submodel)(struct vertex_context *vctx,
+ struct vertex_ioc_execute_submodel *execute);
+};
+
+struct vertex_context_qops {
+ int (*poll)(struct vertex_queue_list *qlist,
+ struct file *file, struct poll_table_struct *poll);
+ int (*set_graph)(struct vertex_queue_list *qlist,
+ struct vs4l_graph *ginfo);
+ int (*set_format)(struct vertex_queue_list *qlist,
+ struct vs4l_format_list *flist);
+ int (*set_param)(struct vertex_queue_list *qlist,
+ struct vs4l_param_list *plist);
+ int (*set_ctrl)(struct vertex_queue_list *qlist,
+ struct vs4l_ctrl *ctrl);
+ int (*qbuf)(struct vertex_queue_list *qlist,
+ struct vs4l_container_list *clist);
+ int (*dqbuf)(struct vertex_queue_list *qlist,
+ struct vs4l_container_list *clist);
+ int (*streamon)(struct vertex_queue_list *qlist);
+ int (*streamoff)(struct vertex_queue_list *qlist);
+};
+
+struct vertex_context_gops {
+ struct vertex_graph_model *(*create_model)(struct vertex_graph *graph,
+ struct vertex_common_graph_info *ginfo);
+ struct vertex_graph_model *(*get_model)(struct vertex_graph *graph,
+ unsigned int id);
+ int (*destroy_model)(struct vertex_graph *graph,
+ struct vertex_graph_model *gmodel);
+ int (*register_model)(struct vertex_graph *graph,
+ struct vertex_graph_model *gmodel);
+ int (*unregister_model)(struct vertex_graph *graph,
+ struct vertex_graph_model *gmodel);
+ int (*start_model)(struct vertex_graph *graph,
+ struct vertex_graph_model *gmodel);
+ int (*stop_model)(struct vertex_graph *graph,
+ struct vertex_graph_model *gmodel);
+ int (*execute_model)(struct vertex_graph *graph,
+ struct vertex_graph_model *gmodel,
+ struct vertex_common_execute_info *einfo);
+};
+
+struct vertex_context {
+ unsigned int state;
+ unsigned int idx;
+ struct list_head list;
+ struct mutex lock;
+
+ const struct vertex_context_ops *vops;
+ int binary_count;
+ struct list_head binary_list;
+ spinlock_t binary_slock;
+ const struct vertex_context_gops *graph_ops;
+
+ const struct vertex_context_qops *queue_ops;
+ struct vertex_queue_list queue_list;
+
+ struct vertex_core *core;
+ struct vertex_graph *graph;
+};
+
+struct vertex_context *vertex_context_create(struct vertex_core *core);
+void vertex_context_destroy(struct vertex_context *vctx);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-util.h"
+#include "vertex-ioctl.h"
+#include "vertex-device.h"
+#include "vertex-graph.h"
+#include "vertex-context.h"
+#include "vertex-core.h"
+
+static inline int __vref_get(struct vertex_core_refcount *vref)
+{
+ int ret;
+
+ vertex_enter();
+ ret = (atomic_inc_return(&vref->refcount) == 1) ?
+ vref->first(vref->core) : 0;
+ if (ret)
+ atomic_dec(&vref->refcount);
+ vertex_leave();
+ return ret;
+}
+
+static inline int __vref_put(struct vertex_core_refcount *vref)
+{
+ int ret;
+
+ vertex_enter();
+ ret = (atomic_dec_return(&vref->refcount) == 0) ?
+ vref->final(vref->core) : 0;
+ if (ret)
+ atomic_inc(&vref->refcount);
+ vertex_leave();
+ return ret;
+}
+
+static int vertex_core_set_graph(struct vertex_context *vctx,
+ struct vs4l_graph *ginfo)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for set_graph (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VERTEX_CONTEXT_OPEN))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not open\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->set_graph(&vctx->queue_list, ginfo);
+ if (ret)
+ goto p_err_queue_ops;
+
+ vctx->state = BIT(VERTEX_CONTEXT_GRAPH);
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_set_format(struct vertex_context *vctx,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for set_format (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & (BIT(VERTEX_CONTEXT_GRAPH) |
+ BIT(VERTEX_CONTEXT_FORMAT)))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not graph/format\n",
+ vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->set_format(&vctx->queue_list, flist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ vctx->state = BIT(VERTEX_CONTEXT_FORMAT);
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_set_param(struct vertex_context *vctx,
+ struct vs4l_param_list *plist)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for set_param (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VERTEX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->set_param(&vctx->queue_list, plist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_set_ctrl(struct vertex_context *vctx,
+ struct vs4l_ctrl *ctrl)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for set_ctrl (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->queue_ops->set_ctrl(&vctx->queue_list, ctrl);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_qbuf(struct vertex_context *vctx,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for qbuf (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VERTEX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->qbuf(&vctx->queue_list, clist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_dqbuf(struct vertex_context *vctx,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for dqbuf (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VERTEX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->dqbuf(&vctx->queue_list, clist);
+ if (ret)
+ goto p_err_queue_ops;
+
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_streamon(struct vertex_context *vctx)
+{
+ int ret;
+ struct vertex_core *core;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for stream-on (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & (BIT(VERTEX_CONTEXT_FORMAT) |
+ BIT(VERTEX_CONTEXT_STOP)))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not format/stop\n",
+ vctx->state);
+ goto p_err_state;
+ }
+
+ core = vctx->core;
+ ret = __vref_get(&core->start_cnt);
+ if (ret) {
+ vertex_err("vref_get(start) is fail(%d)\n", ret);
+ goto p_err_vref;
+ }
+
+ ret = vctx->queue_ops->streamon(&vctx->queue_list);
+ if (ret)
+ goto p_err_queue_ops;
+
+ vctx->state = BIT(VERTEX_CONTEXT_START);
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_queue_ops:
+p_err_vref:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_streamoff(struct vertex_context *vctx)
+{
+ int ret;
+ struct vertex_core *core;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock context lock for stream-off (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ if (!(vctx->state & BIT(VERTEX_CONTEXT_START))) {
+ ret = -EINVAL;
+ vertex_err("Context state(%x) is not start\n", vctx->state);
+ goto p_err_state;
+ }
+
+ ret = vctx->queue_ops->streamoff(&vctx->queue_list);
+ if (ret)
+ goto p_err_queue_ops;
+
+ core = vctx->core;
+ ret = __vref_put(&core->start_cnt);
+ if (ret) {
+ vertex_err("vref_put(start) is fail(%d)\n", ret);
+ goto p_err_vref;
+ }
+
+ vctx->state = BIT(VERTEX_CONTEXT_STOP);
+ mutex_unlock(&vctx->lock);
+ vertex_leave();
+ return 0;
+p_err_vref:
+p_err_queue_ops:
+p_err_state:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_core_load_kernel_binary(struct vertex_context *vctx,
+ struct vertex_ioc_load_kernel_binary *args)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock for loading kernel binary (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->load_kernel_binary(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vertex_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+static int vertex_core_load_graph_info(struct vertex_context *vctx,
+ struct vertex_ioc_load_graph_info *args)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock for loadding graph info (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->load_graph_info(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vertex_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+static int vertex_core_unload_graph_info(struct vertex_context *vctx,
+ struct vertex_ioc_unload_graph_info *args)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock for unloading graph info (%d)\n",
+ ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->unload_graph_info(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vertex_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+static int vertex_core_execute_submodel(struct vertex_context *vctx,
+ struct vertex_ioc_execute_submodel *args)
+{
+ int ret;
+
+ vertex_enter();
+ if (mutex_lock_interruptible(&vctx->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock for executing submodel (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ ret = vctx->vops->execute_submodel(vctx, args);
+ if (ret)
+ goto p_err_vops;
+
+ mutex_unlock(&vctx->lock);
+ args->ret = 0;
+ vertex_leave();
+ return 0;
+p_err_vops:
+ mutex_unlock(&vctx->lock);
+p_err_lock:
+ args->ret = ret;
+ return ret;
+}
+
+const struct vertex_ioctl_ops vertex_core_ioctl_ops = {
+ .set_graph = vertex_core_set_graph,
+ .set_format = vertex_core_set_format,
+ .set_param = vertex_core_set_param,
+ .set_ctrl = vertex_core_set_ctrl,
+ .qbuf = vertex_core_qbuf,
+ .dqbuf = vertex_core_dqbuf,
+ .streamon = vertex_core_streamon,
+ .streamoff = vertex_core_streamoff,
+
+ .load_kernel_binary = vertex_core_load_kernel_binary,
+ .load_graph_info = vertex_core_load_graph_info,
+ .unload_graph_info = vertex_core_unload_graph_info,
+ .execute_submodel = vertex_core_execute_submodel,
+};
+
+static int vertex_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct vertex_device *device;
+ struct vertex_core *core;
+ struct vertex_context *vctx;
+ struct vertex_graph *graph;
+
+ vertex_enter();
+ miscdev = file->private_data;
+ device = dev_get_drvdata(miscdev->parent);
+ core = &device->core;
+
+ if (mutex_lock_interruptible(&core->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock device lock for open (%d)\n", ret);
+ goto p_err_lock;
+ }
+
+ ret = __vref_get(&core->open_cnt);
+ if (ret) {
+ vertex_err("vref_get(open) is fail(%d)", ret);
+ goto p_err_vref;
+ }
+
+ vctx = vertex_context_create(core);
+ if (IS_ERR(vctx)) {
+ ret = PTR_ERR(vctx);
+ goto p_err_vctx;
+ }
+
+ graph = vertex_graph_create(vctx, &core->system->graphmgr);
+ if (IS_ERR(graph)) {
+ ret = PTR_ERR(graph);
+ goto p_err_graph;
+ }
+ vctx->graph = graph;
+
+ file->private_data = vctx;
+
+ mutex_unlock(&core->lock);
+ vertex_leave();
+ return 0;
+p_err_graph:
+p_err_vctx:
+ __vref_put(&core->open_cnt);
+p_err_vref:
+ mutex_unlock(&core->lock);
+p_err_lock:
+ return ret;
+}
+
+static int vertex_release(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct vertex_context *vctx;
+ struct vertex_core *core;
+
+ vertex_enter();
+ vctx = file->private_data;
+ core = vctx->core;
+
+ if (mutex_lock_interruptible(&core->lock)) {
+ ret = -ERESTARTSYS;
+ vertex_err("Failed to lock device lock for release (%d)\n",
+ ret);
+ return ret;
+ }
+
+ vertex_graph_destroy(vctx->graph);
+ vertex_context_destroy(vctx);
+ __vref_put(&core->open_cnt);
+
+ mutex_unlock(&core->lock);
+ vertex_leave();
+ return 0;
+}
+
+static unsigned int vertex_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ int ret;
+ struct vertex_context *vctx;
+
+ vertex_enter();
+ vctx = file->private_data;
+ if (!(vctx->state & BIT(VERTEX_CONTEXT_START))) {
+ ret = POLLERR;
+ vertex_err("Context state(%x) is not start (%d)\n",
+ vctx->state, ret);
+ goto p_err;
+ }
+
+ ret = vctx->queue_ops->poll(&vctx->queue_list, file, poll);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+const struct file_operations vertex_file_ops = {
+ .owner = THIS_MODULE,
+ .open = vertex_open,
+ .release = vertex_release,
+ .poll = vertex_poll,
+ .unlocked_ioctl = vertex_ioctl,
+ .compat_ioctl = vertex_compat_ioctl
+};
+
+static int __vref_open(struct vertex_core *core)
+{
+ vertex_check();
+ atomic_set(&core->start_cnt.refcount, 0);
+ return vertex_device_open(core->device);
+}
+
+static int __vref_close(struct vertex_core *core)
+{
+ vertex_check();
+ return vertex_device_close(core->device);
+}
+
+static int __vref_start(struct vertex_core *core)
+{
+ vertex_check();
+ return vertex_device_start(core->device);
+}
+
+static int __vref_stop(struct vertex_core *core)
+{
+ vertex_check();
+ return vertex_device_stop(core->device);
+}
+
+static inline void __vref_init(struct vertex_core_refcount *vref,
+ struct vertex_core *core,
+ int (*first)(struct vertex_core *core),
+ int (*final)(struct vertex_core *core))
+{
+ vertex_enter();
+ vref->core = core;
+ vref->first = first;
+ vref->final = final;
+ atomic_set(&vref->refcount, 0);
+ vertex_leave();
+}
+
+/* Top-level data for debugging */
+static struct vertex_dev *vdev;
+
+int vertex_core_probe(struct vertex_device *device)
+{
+ int ret;
+ struct vertex_core *core;
+
+ vertex_enter();
+ core = &device->core;
+ core->device = device;
+ core->system = &device->system;
+ vdev = &core->vdev;
+
+ mutex_init(&core->lock);
+ __vref_init(&core->open_cnt, core, __vref_open, __vref_close);
+ __vref_init(&core->start_cnt, core, __vref_start, __vref_stop);
+ core->ioc_ops = &vertex_core_ioctl_ops;
+
+ vertex_util_bitmap_init(core->vctx_map, VERTEX_MAX_CONTEXT);
+ INIT_LIST_HEAD(&core->vctx_list);
+ core->vctx_count = 0;
+
+ vdev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ vdev->miscdev.name = VERTEX_DEV_NAME;
+ vdev->miscdev.fops = &vertex_file_ops;
+ vdev->miscdev.parent = device->dev;
+
+ ret = misc_register(&vdev->miscdev);
+ if (ret) {
+ vertex_err("miscdevice is not registered (%d)\n", ret);
+ goto p_err_misc;
+ }
+
+ vertex_leave();
+ return 0;
+p_err_misc:
+ mutex_destroy(&core->lock);
+ return ret;
+}
+
+void vertex_core_remove(struct vertex_core *core)
+{
+ vertex_enter();
+ misc_deregister(&core->vdev.miscdev);
+ mutex_destroy(&core->lock);
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_CORE_H__
+#define __VERTEX_CORE_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+
+#include "vertex-system.h"
+
+/* Information about VIPx device file */
+#define VERTEX_DEV_NAME_LEN (16)
+#define VERTEX_DEV_NAME "vertex0"
+
+struct vertex_device;
+struct vertex_core;
+
+struct vertex_core_refcount {
+ atomic_t refcount;
+ struct vertex_core *core;
+ int (*first)(struct vertex_core *core);
+ int (*final)(struct vertex_core *core);
+};
+
+struct vertex_dev {
+ int minor;
+ char name[VERTEX_DEV_NAME_LEN];
+ struct miscdevice miscdev;
+};
+
+struct vertex_core {
+ struct vertex_dev vdev;
+ struct mutex lock;
+ struct vertex_core_refcount open_cnt;
+ struct vertex_core_refcount start_cnt;
+ const struct vertex_ioctl_ops *ioc_ops;
+ DECLARE_BITMAP(vctx_map, VERTEX_MAX_CONTEXT);
+ struct list_head vctx_list;
+ unsigned int vctx_count;
+
+ struct vertex_device *device;
+ struct vertex_system *system;
+};
+
+int vertex_core_probe(struct vertex_device *device);
+void vertex_core_remove(struct vertex_core *core);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+
+#include "vertex-log.h"
+#include "vertex-device.h"
+#include "vertex-graphmgr.h"
+#include "vertex-graph.h"
+#include "vertex-debug.h"
+
+#define DEBUG_FS_ROOT_NAME "vertex"
+#define DEBUG_FS_LOGFILE_NAME "fw-msg"
+#define DEBUG_FS_IMGFILE_NAME "dump-img"
+#define DEBUG_FS_GRPFILE_NAME "graph"
+#define DEBUG_FS_BUFFILE_NAME "buffer"
+
+#define DEBUG_MONITORING_PERIOD (HZ * 5)
+#define CHUNK_SIZE (32)
+
+static struct vertex_device *debug_device;
+
+int vertex_debug_write_log_binary(void)
+{
+ int ret;
+ struct vertex_system *sys;
+ struct vertex_binary *bin;
+
+ vertex_enter();
+ sys = &debug_device->system;
+ bin = &sys->binary;
+
+ if (!sys->memory.debug.kvaddr)
+ return -ENOMEM;
+
+ ret = vertex_binary_write(bin, VERTEX_DEBUG_BIN_PATH,
+ "vertex_log.bin",
+ sys->memory.debug.kvaddr,
+ sys->memory.debug.size);
+ if (!ret)
+ vertex_info("%s/vertex_log.bin was created for debugging\n",
+ VERTEX_DEBUG_BIN_PATH);
+
+ vertex_leave();
+ return ret;
+}
+
+int vertex_debug_dump_debug_regs(void)
+{
+ struct vertex_system *sys;
+
+ vertex_enter();
+ sys = &debug_device->system;
+
+ sys->ctrl_ops->debug_dump(sys);
+ vertex_leave();
+ return 0;
+}
+
+int vertex_debug_atoi(const char *psz_buf)
+{
+ const char *pch = psz_buf;
+ int base;
+
+ while (isspace(*pch))
+ pch++;
+
+ if (*pch == '-' || *pch == '+') {
+ base = 10;
+ pch++;
+ } else if (*pch && tolower(pch[strlen(pch) - 1]) == 'h') {
+ base = 16;
+ } else {
+ base = 0;
+ }
+
+ return kstrtoul(pch, base, NULL);
+}
+
+int vertex_debug_bitmap_scnprintf(char *buf, unsigned int buflen,
+ const unsigned long *maskp, int nmaskbits)
+{
+ int chunksz;
+ int idx;
+ unsigned int chunkmask;
+ int word, bit, len;
+ unsigned long val;
+ const char *sep = "";
+
+ chunksz = nmaskbits & (CHUNK_SIZE - 1);
+ if (chunksz == 0)
+ chunksz = CHUNK_SIZE;
+
+ for (idx = ALIGN(nmaskbits, CHUNK_SIZE) - CHUNK_SIZE, len = 0; idx >= 0;
+ idx -= CHUNK_SIZE) {
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = idx / BITS_PER_LONG;
+ bit = idx % BITS_PER_LONG;
+ val = (maskp[word] >> bit) & chunkmask;
+ len += scnprintf(buf + len, buflen - len, "%s%0*lx", sep,
+ (chunksz + 3) / 4, val);
+ chunksz = CHUNK_SIZE;
+ sep = ",";
+ }
+
+ return len;
+}
+
+void vertex_dmsg_concate(struct vertex_debug_log *log, const char *fmt, ...)
+{
+ va_list ap;
+ char term[50];
+ size_t size;
+
+ va_start(ap, fmt);
+ vsnprintf(term, sizeof(term), fmt, ap);
+ va_end(ap);
+
+ if (log->dsentence_pos >= DEBUG_SENTENCE_MAX) {
+ vertex_err("debug message(%zu) over max\n", log->dsentence_pos);
+ return;
+ }
+
+ size = min((DEBUG_SENTENCE_MAX - log->dsentence_pos - 1),
+ strlen(term));
+ strncpy(log->dsentence + log->dsentence_pos, term, size);
+ log->dsentence_pos += size;
+ log->dsentence[log->dsentence_pos] = 0;
+}
+
+char *vertex_dmsg_print(struct vertex_debug_log *log)
+{
+ log->dsentence_pos = 0;
+ return log->dsentence;
+}
+
+void vertex_debug_memdump(const char *prefix, void *start, size_t size)
+{
+ char log[30];
+
+ snprintf(log, sizeof(log), "[VIPx-VERTEX]%s ", prefix);
+ print_hex_dump(KERN_DEBUG, log, DUMP_PREFIX_OFFSET, 32, 4, start,
+ size, false);
+}
+
+static int vertex_debug_log_open(struct inode *inode, struct file *file)
+{
+ vertex_enter();
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+ vertex_leave();
+ return 0;
+}
+
+static ssize_t vertex_debug_log_read(struct file *file, char __user *user_buf,
+ size_t buf_len, loff_t *ppos)
+{
+ int ret;
+ size_t size;
+ struct vertex_debug *debug;
+ struct vertex_system *system;
+
+ vertex_enter();
+ debug = file->private_data;
+ if (!debug) {
+ vertex_err("Cannot find vertex_debug\n");
+ return 0;
+ }
+
+ system = debug->system;
+ if (!system) {
+ vertex_err("Cannot find vertex_system\n");
+ return 0;
+ }
+
+ if (buf_len > VERTEX_DEBUG_SIZE)
+ size = VERTEX_DEBUG_SIZE;
+ else
+ size = buf_len;
+
+ if (!system->memory.debug.kvaddr) {
+ vertex_err("Cannot find debug region for vertex\n");
+ return 0;
+ }
+
+ ret = copy_to_user(user_buf, system->memory.debug.kvaddr, size);
+ if (ret)
+ memcpy(user_buf, system->memory.debug.kvaddr, size);
+
+ vertex_leave();
+ return size;
+}
+
+static int __vertex_debug_grp_show(struct seq_file *s, void *unused)
+{
+ unsigned int idx;
+ struct vertex_debug *debug;
+ struct vertex_graphmgr *gmgr;
+ struct vertex_graph *graph;
+
+ vertex_enter();
+ debug = s->private;
+
+ seq_printf(s, "%7.s %7.s %7.s %7.s %7.s %7.s %7.s\n", "graph", "prio",
+ "period", "input", "done", "cancel", "recent");
+
+ gmgr = &debug->system->graphmgr;
+ mutex_lock(&gmgr->mlock);
+ for (idx = 0; idx < VERTEX_MAX_GRAPH; ++idx) {
+ graph = gmgr->graph[idx];
+
+ if (!graph)
+ continue;
+
+ seq_printf(s, "%2d(%3d) %7d %7d %7d %7d %7d\n",
+ graph->idx, graph->uid, graph->priority,
+ graph->input_cnt, graph->done_cnt,
+ graph->cancel_cnt, graph->recent);
+ }
+ mutex_unlock(&gmgr->mlock);
+
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_debug_grp_open(struct inode *inode, struct file *file)
+{
+ vertex_check();
+ return single_open(file, __vertex_debug_grp_show, inode->i_private);
+}
+
+static int __vertex_debug_buf_show(struct seq_file *s, void *unused)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_debug_buf_open(struct inode *inode, struct file *file)
+{
+ vertex_enter();
+ vertex_leave();
+ return single_open(file, __vertex_debug_buf_show, inode->i_private);
+}
+
+static int vertex_debug_img_open(struct inode *inode, struct file *file)
+{
+ vertex_enter();
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+ vertex_leave();
+ return 0;
+}
+
+static ssize_t vertex_debug_img_read(struct file *file, char __user *user_buf,
+ size_t len, loff_t *ppos)
+{
+ int ret;
+ size_t size;
+ struct vertex_debug *debug;
+ struct vertex_debug_imgdump *imgdump;
+
+ vertex_enter();
+ debug = file->private_data;
+ imgdump = &debug->imgdump;
+
+ if (!imgdump->kvaddr) {
+ vertex_err("kvaddr of imgdump for debugging is NULL\n");
+ return 0;
+ }
+
+ /* TODO check */
+ //if (!imgdump->cookie) {
+ // vertex_err("cookie is NULL\n");
+ // return 0;
+ //}
+
+ if (len <= imgdump->length)
+ size = len;
+ else
+ size = imgdump->length;
+
+ if (!size) {
+ imgdump->cookie = NULL;
+ imgdump->kvaddr = NULL;
+ imgdump->length = 0;
+ imgdump->offset = 0;
+ return 0;
+ }
+
+ /* HACK for test */
+ memset(imgdump->kvaddr, 0x88, size / 2);
+ /* TODO check */
+ //vb2_ion_sync_for_device(imgdump->cookie, imgdump->offset,
+ // size, DMA_FROM_DEVICE);
+ ret = copy_to_user(user_buf, imgdump->kvaddr, size);
+ if (ret)
+ memcpy(user_buf, imgdump->kvaddr, size);
+
+ imgdump->offset += size;
+ imgdump->length -= size;
+ imgdump->kvaddr = (char *)imgdump->kvaddr + size;
+
+ vertex_leave();
+ return size;
+}
+
+static ssize_t vertex_debug_img_write(struct file *file,
+ const char __user *user_buf, size_t len, loff_t *ppos)
+{
+ vertex_enter();
+ vertex_leave();
+ return len;
+}
+
+static const struct file_operations vertex_debug_log_fops = {
+ .open = vertex_debug_log_open,
+ .read = vertex_debug_log_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations vertex_debug_grp_fops = {
+ .open = vertex_debug_grp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations vertex_debug_buf_fops = {
+ .open = vertex_debug_buf_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static const struct file_operations vertex_debug_img_fops = {
+ .open = vertex_debug_img_open,
+ .read = vertex_debug_img_read,
+ .write = vertex_debug_img_write,
+ .llseek = default_llseek
+};
+
+static void __vertex_debug_monitor_fn(unsigned long data)
+{
+ struct vertex_debug *debug;
+ struct vertex_debug_monitor *monitor;
+ struct vertex_graphmgr *gmgr;
+ struct vertex_system *system;
+ struct vertex_interface *itf;
+
+ vertex_enter();
+ debug = (struct vertex_debug *)data;
+ monitor = &debug->monitor;
+ system = debug->system;
+ gmgr = &system->graphmgr;
+ itf = &system->interface;
+
+ if (!test_bit(VERTEX_DEBUG_STATE_START, &debug->state))
+ return;
+
+ if (monitor->tick_cnt == gmgr->tick_cnt)
+ vertex_err("timer thread is stuck (%u,%u)\n",
+ monitor->tick_cnt, gmgr->tick_pos);
+
+ if (monitor->sched_cnt == gmgr->sched_cnt) {
+ struct vertex_graph *graph;
+ unsigned int idx;
+
+ vertex_dbg("[DEBUGFS] GRAPH\n");
+ for (idx = 0; idx < VERTEX_MAX_GRAPH; idx++) {
+ graph = gmgr->graph[idx];
+ if (!graph)
+ continue;
+
+ vertex_graph_print(graph);
+ }
+
+ vertex_dbg("[DEBUGFS] GRAPH-MGR\n");
+ vertex_taskdesc_print(gmgr);
+
+ vertex_dbg("[DEBUGFS] INTERFACE-MGR\n");
+ vertex_task_print_all(&itf->taskmgr);
+ }
+
+ monitor->tick_cnt = gmgr->tick_cnt;
+ monitor->sched_cnt = gmgr->sched_cnt;
+ monitor->done_cnt = itf->done_cnt;
+
+ monitor->time_cnt++;
+ mod_timer(&monitor->timer, jiffies + DEBUG_MONITORING_PERIOD);
+ vertex_leave();
+}
+
+static int __vertex_debug_stop(struct vertex_debug *debug)
+{
+ struct vertex_debug_monitor *monitor;
+
+ vertex_enter();
+ monitor = &debug->monitor;
+
+ if (!test_bit(VERTEX_DEBUG_STATE_START, &debug->state))
+ goto p_end;
+
+ /* TODO check debug */
+ //del_timer_sync(&monitor->timer);
+ clear_bit(VERTEX_DEBUG_STATE_START, &debug->state);
+
+ vertex_leave();
+p_end:
+ return 0;
+}
+
+int vertex_debug_start(struct vertex_debug *debug)
+{
+ struct vertex_system *system = debug->system;
+ struct vertex_graphmgr *gmgr = &system->graphmgr;
+ struct vertex_debug_monitor *monitor = &debug->monitor;
+ struct vertex_interface *itf = &system->interface;
+
+ vertex_enter();
+ monitor->time_cnt = 0;
+ monitor->tick_cnt = gmgr->tick_cnt;
+ monitor->sched_cnt = gmgr->sched_cnt;
+ monitor->done_cnt = itf->done_cnt;
+
+ init_timer(&monitor->timer);
+ monitor->timer.expires = jiffies + DEBUG_MONITORING_PERIOD;
+ monitor->timer.data = (unsigned long)debug;
+ monitor->timer.function = __vertex_debug_monitor_fn;
+ /* TODO check debug */
+ //add_timer(&monitor->timer);
+
+ set_bit(VERTEX_DEBUG_STATE_START, &debug->state);
+
+ vertex_leave();
+ return 0;
+}
+
+int vertex_debug_stop(struct vertex_debug *debug)
+{
+ vertex_check();
+ return __vertex_debug_stop(debug);
+}
+
+int vertex_debug_open(struct vertex_debug *debug)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+int vertex_debug_close(struct vertex_debug *debug)
+{
+ vertex_check();
+ return __vertex_debug_stop(debug);
+}
+
+int vertex_debug_probe(struct vertex_device *device)
+{
+ struct vertex_debug *debug;
+
+ vertex_enter();
+ debug_device = device;
+ debug = &device->debug;
+ debug->system = &device->system;
+ debug->state = 0;
+
+ debug->root = debugfs_create_dir(DEBUG_FS_ROOT_NAME, NULL);
+ if (!debug->root) {
+ vertex_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_ROOT_NAME);
+ goto p_end;
+ }
+
+ debug->logfile = debugfs_create_file(DEBUG_FS_LOGFILE_NAME, 0400,
+ debug->root, debug, &vertex_debug_log_fops);
+ if (!debug->logfile)
+ vertex_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_LOGFILE_NAME);
+
+ debug->grpfile = debugfs_create_file(DEBUG_FS_GRPFILE_NAME, 0400,
+ debug->root, debug, &vertex_debug_grp_fops);
+ if (!debug->grpfile)
+ vertex_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_GRPFILE_NAME);
+
+ debug->buffile = debugfs_create_file(DEBUG_FS_BUFFILE_NAME, 0400,
+ debug->root, debug, &vertex_debug_buf_fops);
+ if (!debug->buffile)
+ vertex_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_BUFFILE_NAME);
+
+ debug->imgdump.file = debugfs_create_file(DEBUG_FS_IMGFILE_NAME, 0400,
+ debug->root, debug, &vertex_debug_img_fops);
+ if (!debug->imgdump.file)
+ vertex_err("Filed to create debug file[%s]\n",
+ DEBUG_FS_IMGFILE_NAME);
+
+ vertex_leave();
+p_end:
+ return 0;
+}
+
+void vertex_debug_remove(struct vertex_debug *debug)
+{
+ debugfs_remove_recursive(debug->root);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_DEBUG_H__
+#define __VERTEX_DEBUG_H__
+
+#include <linux/timer.h>
+
+#include "vertex-config.h"
+#include "vertex-system.h"
+
+#ifdef DEBUG_LOG_CONCATE_ENABLE
+#define DLOG_INIT() struct vertex_debug_log dlog = \
+ { .dsentence_pos = 0 }
+#define DLOG(fmt, ...) \
+ vertex_debug_log_concate(&dlog, fmt, ##__VA_ARGS__)
+#define DLOG_OUT() vertex_debug_log_print(&dlog)
+#else
+#define DLOG_INIT()
+#define DLOG(fmt, ...)
+#define DLOG_OUT() "FORBIDDEN HISTORY"
+#endif
+#define DEBUG_SENTENCE_MAX (300)
+
+struct vertex_device;
+
+struct vertex_debug_imgdump {
+ struct dentry *file;
+ unsigned int target_graph;
+ unsigned int target_chain;
+ unsigned int target_pu;
+ unsigned int target_index;
+ void *kvaddr;
+ void *cookie;
+ size_t length;
+ size_t offset;
+};
+
+struct vertex_debug_monitor {
+ unsigned int time_cnt;
+ unsigned int tick_cnt;
+ unsigned int sched_cnt;
+ unsigned int done_cnt;
+ struct timer_list timer;
+};
+
+enum vertex_debug_state {
+ VERTEX_DEBUG_STATE_START,
+};
+
+struct vertex_debug {
+ unsigned long state;
+ struct vertex_system *system;
+
+ struct dentry *root;
+ struct dentry *logfile;
+ struct dentry *grpfile;
+ struct dentry *buffile;
+
+ struct vertex_debug_imgdump imgdump;
+ struct vertex_debug_monitor monitor;
+};
+
+struct vertex_debug_log {
+ size_t dsentence_pos;
+ char dsentence[DEBUG_SENTENCE_MAX];
+};
+
+int vertex_debug_write_log_binary(void);
+int vertex_debug_dump_debug_regs(void);
+
+int vertex_debug_atoi(const char *psz_buf);
+int vertex_debug_bitmap_scnprintf(char *buf, unsigned int buflen,
+ const unsigned long *maskp, int nmaskbits);
+
+void vertex_debug_log_concate(struct vertex_debug_log *log,
+ const char *fmt, ...);
+char *vertex_debug_log_print(struct vertex_debug_log *log);
+
+void vertex_debug_memdump(const char *prefix, void *start, size_t size);
+
+int vertex_debug_start(struct vertex_debug *debug);
+int vertex_debug_stop(struct vertex_debug *debug);
+int vertex_debug_open(struct vertex_debug *debug);
+int vertex_debug_close(struct vertex_debug *debug);
+
+int vertex_debug_probe(struct vertex_device *device);
+void vertex_debug_remove(struct vertex_debug *debug);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/exynos_iovmm.h>
+
+#include "vertex-log.h"
+#include "vertex-graph.h"
+#include "vertex-device.h"
+
+void __vertex_fault_handler(struct vertex_device *device)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+static int __attribute__((unused)) vertex_fault_handler(
+ struct iommu_domain *domain, struct device *dev,
+ unsigned long fault_addr, int fault_flag, void *token)
+{
+ struct vertex_device *device;
+
+ pr_err("< VERTEX FAULT HANDLER >\n");
+ pr_err("Device virtual(0x%lX) is invalid access\n", fault_addr);
+
+ device = dev_get_drvdata(dev);
+ vertex_debug_dump_debug_regs();
+
+ __vertex_fault_handler(device);
+
+ return -EINVAL;
+}
+
+#if defined(CONFIG_PM_SLEEP)
+static int vertex_device_suspend(struct device *dev)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_device_resume(struct device *dev)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+#endif
+
+static int vertex_device_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct vertex_device *device;
+
+ vertex_enter();
+ device = dev_get_drvdata(dev);
+
+ ret = vertex_system_suspend(&device->system);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+static int vertex_device_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct vertex_device *device;
+
+ vertex_enter();
+ device = dev_get_drvdata(dev);
+
+ ret = vertex_system_resume(&device->system);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+static const struct dev_pm_ops vertex_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ vertex_device_suspend,
+ vertex_device_resume)
+ SET_RUNTIME_PM_OPS(
+ vertex_device_runtime_suspend,
+ vertex_device_runtime_resume,
+ NULL)
+};
+
+static int __vertex_device_start(struct vertex_device *device)
+{
+ int ret;
+
+ vertex_enter();
+ if (!test_bit(VERTEX_DEVICE_STATE_OPEN, &device->state)) {
+ ret = -EINVAL;
+ vertex_err("device was not opend yet (%lx)\n", device->state);
+ goto p_err_state;
+ }
+
+ if (test_bit(VERTEX_DEVICE_STATE_START, &device->state))
+ return 0;
+
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ set_bit(VERTEX_DEVICE_STATE_START, &device->state);
+ return 0;
+#endif
+ ret = vertex_system_start(&device->system);
+ if (ret)
+ goto p_err_system;
+
+ ret = vertex_debug_start(&device->debug);
+ if (ret)
+ goto p_err_debug;
+
+ set_bit(VERTEX_DEVICE_STATE_START, &device->state);
+
+ vertex_leave();
+ return 0;
+p_err_debug:
+ vertex_system_stop(&device->system);
+p_err_system:
+p_err_state:
+ return ret;
+}
+
+static int __vertex_device_stop(struct vertex_device *device)
+{
+ vertex_enter();
+ if (!test_bit(VERTEX_DEVICE_STATE_START, &device->state))
+ return 0;
+
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ clear_bit(VERTEX_DEVICE_STATE_START, &device->state);
+ return 0;
+#endif
+ vertex_debug_stop(&device->debug);
+ vertex_system_stop(&device->system);
+ clear_bit(VERTEX_DEVICE_STATE_START, &device->state);
+
+ vertex_leave();
+ return 0;
+}
+
+static int __vertex_device_power_on(struct vertex_device *device)
+{
+ int ret;
+
+ vertex_enter();
+#if defined(CONFIG_PM)
+ ret = pm_runtime_get_sync(device->dev);
+ if (ret) {
+ vertex_err("Failed to get pm_runtime sync (%d)\n", ret);
+ goto p_err;
+ }
+#else
+ ret = vertex_device_runtime_resume(device->dev);
+ if (ret)
+ goto p_err;
+#endif
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+static int __vertex_device_power_off(struct vertex_device *device)
+{
+ int ret;
+
+ vertex_enter();
+#if defined(CONFIG_PM)
+ ret = pm_runtime_put_sync(device->dev);
+ if (ret) {
+ vertex_err("Failed to put pm_runtime sync (%d)\n", ret);
+ goto p_err;
+ }
+#else
+ ret = vertex_device_runtime_suspend(device->dev);
+ if (ret)
+ goto p_err;
+#endif
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+int vertex_device_start(struct vertex_device *device)
+{
+ int ret;
+
+ vertex_enter();
+ ret = __vertex_device_start(device);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+int vertex_device_stop(struct vertex_device *device)
+{
+ int ret;
+
+ vertex_enter();
+ ret = __vertex_device_stop(device);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+int vertex_device_open(struct vertex_device *device)
+{
+ int ret;
+
+ vertex_enter();
+ if (test_bit(VERTEX_DEVICE_STATE_OPEN, &device->state)) {
+ ret = -EINVAL;
+ vertex_warn("device was already opened\n");
+ goto p_err_state;
+ }
+
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ device->system.graphmgr.current_model = NULL;
+ set_bit(VERTEX_DEVICE_STATE_OPEN, &device->state);
+ return 0;
+#endif
+
+ ret = vertex_system_open(&device->system);
+ if (ret)
+ goto p_err_system;
+
+ ret = vertex_debug_open(&device->debug);
+ if (ret)
+ goto p_err_debug;
+
+ ret = __vertex_device_power_on(device);
+ if (ret)
+ goto p_err_power;
+
+ ret = vertex_system_fw_bootup(&device->system);
+ if (ret)
+ goto p_err_boot;
+
+ set_bit(VERTEX_DEVICE_STATE_OPEN, &device->state);
+
+ vertex_leave();
+ return 0;
+p_err_boot:
+ __vertex_device_power_off(device);
+p_err_power:
+ vertex_debug_close(&device->debug);
+p_err_debug:
+ vertex_system_close(&device->system);
+p_err_system:
+p_err_state:
+ return ret;
+}
+
+int vertex_device_close(struct vertex_device *device)
+{
+ vertex_enter();
+ if (!test_bit(VERTEX_DEVICE_STATE_OPEN, &device->state)) {
+ vertex_warn("device is already closed\n");
+ goto p_err;
+ }
+
+ if (test_bit(VERTEX_DEVICE_STATE_START, &device->state))
+ __vertex_device_stop(device);
+
+#ifdef TEMP_RT_FRAMEWORK_TEST
+ clear_bit(VERTEX_DEVICE_STATE_OPEN, &device->state);
+ return 0;
+#endif
+ __vertex_device_power_off(device);
+ vertex_debug_close(&device->debug);
+ vertex_system_close(&device->system);
+ clear_bit(VERTEX_DEVICE_STATE_OPEN, &device->state);
+
+ vertex_leave();
+p_err:
+ return 0;
+}
+
+static int vertex_device_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev;
+ struct vertex_device *device;
+
+ vertex_enter();
+ dev = &pdev->dev;
+
+ device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ ret = -ENOMEM;
+ vertex_err("Fail to alloc device structure\n");
+ goto p_err_alloc;
+ }
+
+ get_device(dev);
+ device->dev = dev;
+ dev_set_drvdata(dev, device);
+
+ ret = vertex_system_probe(device);
+ if (ret)
+ goto p_err_system;
+
+ ret = vertex_core_probe(device);
+ if (ret)
+ goto p_err_core;
+
+ ret = vertex_debug_probe(device);
+ if (ret)
+ goto p_err_debug;
+
+ iovmm_set_fault_handler(dev, vertex_fault_handler, NULL);
+ pm_runtime_enable(dev);
+
+ vertex_leave();
+ vertex_info("vertex device is initilized\n");
+ return 0;
+p_err_debug:
+ vertex_core_remove(&device->core);
+p_err_core:
+ vertex_system_remove(&device->system);
+p_err_system:
+ devm_kfree(dev, device);
+p_err_alloc:
+ vertex_err("vertex device is not registered (%d)\n", ret);
+ return ret;
+}
+
+static int vertex_device_remove(struct platform_device *pdev)
+{
+ struct vertex_device *device;
+
+ vertex_enter();
+ device = dev_get_drvdata(&pdev->dev);
+
+ vertex_debug_remove(&device->debug);
+ vertex_core_remove(&device->core);
+ vertex_system_remove(&device->system);
+ devm_kfree(device->dev, device);
+ vertex_leave();
+ return 0;
+}
+
+static void vertex_device_shutdown(struct platform_device *pdev)
+{
+ struct vertex_device *device;
+
+ vertex_enter();
+ device = dev_get_drvdata(&pdev->dev);
+ vertex_leave();
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id exynos_vertex_match[] = {
+ {
+ .compatible = "samsung,exynos-vipx-vertex",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, exynos_vertex_match);
+#endif
+
+static struct platform_driver vertex_driver = {
+ .probe = vertex_device_probe,
+ .remove = vertex_device_remove,
+ .shutdown = vertex_device_shutdown,
+ .driver = {
+ .name = "exynos-vipx-vertex",
+ .owner = THIS_MODULE,
+ .pm = &vertex_pm_ops,
+#if defined(CONFIG_OF)
+ .of_match_table = of_match_ptr(exynos_vertex_match)
+#endif
+ }
+};
+
+int __init vertex_device_init(void)
+{
+ int ret;
+
+ vertex_enter();
+ ret = platform_driver_register(&vertex_driver);
+ if (ret)
+ vertex_err("platform driver for vertex is not registered(%d)\n",
+ ret);
+
+ vertex_leave();
+ return ret;
+}
+
+void __exit vertex_device_exit(void)
+{
+ vertex_enter();
+ platform_driver_unregister(&vertex_driver);
+ vertex_leave();
+}
+
+MODULE_AUTHOR("@samsung.com>");
+MODULE_DESCRIPTION("Exynos VIPx temp driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_DEVICE_H__
+#define __VERTEX_DEVICE_H__
+
+#include <linux/device.h>
+
+#include "vertex-system.h"
+#include "vertex-core.h"
+#include "vertex-debug.h"
+
+struct vertex_device;
+
+enum vertex_device_state {
+ VERTEX_DEVICE_STATE_OPEN,
+ VERTEX_DEVICE_STATE_START
+};
+
+struct vertex_device {
+ struct device *dev;
+ unsigned long state;
+
+ struct vertex_system system;
+ struct vertex_core core;
+ struct vertex_debug debug;
+};
+
+int vertex_device_open(struct vertex_device *device);
+int vertex_device_close(struct vertex_device *device);
+int vertex_device_start(struct vertex_device *device);
+int vertex_device_stop(struct vertex_device *device);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+
+#include "vertex-log.h"
+#include "vertex-core.h"
+#include "vertex-queue.h"
+#include "vertex-context.h"
+#include "vertex-graphmgr.h"
+#include "vertex-graph.h"
+
+#define VERTEX_GRAPH_MAX_PRIORITY (20)
+#define VERTEX_GRAPH_TIMEOUT (3 * HZ)
+
+static int __vertex_graph_start(struct vertex_graph *graph)
+{
+ int ret;
+
+ vertex_enter();
+ if (test_bit(VERTEX_GRAPH_STATE_START, &graph->state))
+ return 0;
+
+ ret = vertex_graphmgr_grp_start(graph->owner, graph);
+ if (ret)
+ goto p_err;
+
+ set_bit(VERTEX_GRAPH_STATE_START, &graph->state);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_graph_stop(struct vertex_graph *graph)
+{
+ unsigned int timeout, retry;
+ struct vertex_taskmgr *tmgr;
+ struct vertex_task *control;
+
+ vertex_enter();
+ if (!test_bit(VERTEX_GRAPH_STATE_START, &graph->state))
+ return 0;
+
+ tmgr = &graph->taskmgr;
+ if (tmgr->req_cnt + tmgr->pre_cnt) {
+ control = &graph->control;
+ control->message = VERTEX_CTRL_STOP;
+ vertex_graphmgr_queue(graph->owner, control);
+ timeout = wait_event_timeout(graph->control_wq,
+ control->message == VERTEX_CTRL_STOP_DONE,
+ VERTEX_GRAPH_TIMEOUT);
+ if (!timeout)
+ vertex_err("wait time(%u ms) is expired (%u)\n",
+ jiffies_to_msecs(VERTEX_GRAPH_TIMEOUT),
+ graph->idx);
+ }
+
+ retry = VERTEX_STOP_WAIT_COUNT;
+ while (tmgr->req_cnt) {
+ if (!retry)
+ break;
+
+ vertex_warn("Waiting request count(%u) to be zero (%u/%u)\n",
+ tmgr->req_cnt, retry, graph->idx);
+ udelay(10);
+ }
+
+ if (tmgr->req_cnt)
+ vertex_err("request count(%u) is remained (%u)\n",
+ tmgr->req_cnt, graph->idx);
+
+ retry = VERTEX_STOP_WAIT_COUNT;
+ while (tmgr->pre_cnt) {
+ if (!retry)
+ break;
+
+ vertex_warn("Waiting prepare count(%u) to be zero (%u/%u)\n",
+ tmgr->pre_cnt, retry, graph->idx);
+ udelay(10);
+ }
+
+ if (tmgr->pre_cnt)
+ vertex_err("prepare count(%u) is remained (%u)\n",
+ tmgr->pre_cnt, graph->idx);
+
+ retry = VERTEX_STOP_WAIT_COUNT;
+ while (tmgr->pro_cnt) {
+ if (!retry)
+ break;
+
+ vertex_warn("Waiting process count(%u) to be zero (%u/%u)\n",
+ tmgr->pro_cnt, retry, graph->idx);
+ udelay(10);
+ }
+
+ if (tmgr->pro_cnt)
+ vertex_err("process count(%u) is remained (%u)\n",
+ tmgr->pro_cnt, graph->idx);
+
+ vertex_graphmgr_grp_stop(graph->owner, graph);
+ vertex_task_flush(tmgr);
+ clear_bit(VERTEX_GRAPH_STATE_START, &graph->state);
+
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_graph_set_graph(struct vertex_graph *graph,
+ struct vs4l_graph *ginfo)
+{
+ int ret;
+
+ vertex_enter();
+ if (test_bit(VERTEX_GRAPH_STATE_CONFIG, &graph->state)) {
+ ret = -EINVAL;
+ vertex_err("graph(%u) is already configured (%lu)\n",
+ graph->idx, graph->state);
+ goto p_err;
+ }
+
+ if (ginfo->priority > VERTEX_GRAPH_MAX_PRIORITY) {
+ vertex_warn("graph(%u) priority is over (%u/%u)\n",
+ graph->idx, ginfo->priority,
+ VERTEX_GRAPH_MAX_PRIORITY);
+ ginfo->priority = VERTEX_GRAPH_MAX_PRIORITY;
+ }
+
+ graph->uid = ginfo->id;
+ graph->flags = ginfo->flags;
+ graph->priority = ginfo->priority;
+
+ set_bit(VERTEX_GRAPH_STATE_CONFIG, &graph->state);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_set_format(struct vertex_graph *graph,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+ struct vertex_format_list *in_flist;
+ struct vertex_format_list *ot_flist;
+ unsigned int cnt;
+
+ vertex_enter();
+ in_flist = &graph->inflist;
+ ot_flist = &graph->otflist;
+
+ if (flist->direction == VS4L_DIRECTION_IN) {
+ if (in_flist->count != flist->count) {
+ kfree(in_flist->formats);
+
+ in_flist->count = flist->count;
+ in_flist->formats = kcalloc(in_flist->count,
+ sizeof(*in_flist->formats), GFP_KERNEL);
+ if (!in_flist->formats) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc in_flist formats\n");
+ goto p_err;
+ }
+
+ for (cnt = 0; cnt < in_flist->count; ++cnt) {
+ in_flist->formats[cnt].format =
+ flist->formats[cnt].format;
+ in_flist->formats[cnt].plane =
+ flist->formats[cnt].plane;
+ in_flist->formats[cnt].width =
+ flist->formats[cnt].width;
+ in_flist->formats[cnt].height =
+ flist->formats[cnt].height;
+ }
+ }
+ } else if (flist->direction == VS4L_DIRECTION_OT) {
+ if (ot_flist->count != flist->count) {
+ kfree(ot_flist->formats);
+
+ ot_flist->count = flist->count;
+ ot_flist->formats = kcalloc(ot_flist->count,
+ sizeof(*ot_flist->formats), GFP_KERNEL);
+ if (!ot_flist->formats) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc ot_flist formats\n");
+ goto p_err;
+ }
+
+ for (cnt = 0; cnt < ot_flist->count; ++cnt) {
+ ot_flist->formats[cnt].format =
+ flist->formats[cnt].format;
+ ot_flist->formats[cnt].plane =
+ flist->formats[cnt].plane;
+ ot_flist->formats[cnt].width =
+ flist->formats[cnt].width;
+ ot_flist->formats[cnt].height =
+ flist->formats[cnt].height;
+ }
+ }
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid direction (%d)\n", flist->direction);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ kfree(in_flist->formats);
+ in_flist->formats = NULL;
+ in_flist->count = 0;
+
+ kfree(ot_flist->formats);
+ ot_flist->formats = NULL;
+ ot_flist->count = 0;
+ return ret;
+}
+
+static int vertex_graph_set_param(struct vertex_graph *graph,
+ struct vs4l_param_list *plist)
+{
+ vertex_enter();
+ set_bit(VERTEX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags);
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_graph_set_ctrl(struct vertex_graph *graph,
+ struct vs4l_ctrl *ctrl)
+{
+ vertex_enter();
+ vertex_graph_print(graph);
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_graph_queue(struct vertex_graph *graph,
+ struct vertex_container_list *incl,
+ struct vertex_container_list *otcl)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+ struct vertex_task *task;
+ unsigned long flags;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+
+ if (!test_bit(VERTEX_GRAPH_STATE_START, &graph->state)) {
+ ret = -EINVAL;
+ vertex_err("graph(%u) is not started (%lu)\n",
+ graph->idx, graph->state);
+ goto p_err;
+ }
+
+ if (incl->id != otcl->id) {
+ vertex_warn("buffer id is incoincidence (%u/%u)\n",
+ incl->id, otcl->id);
+ otcl->id = incl->id;
+ }
+
+ taskmgr_e_barrier_irqs(tmgr, 0, flags);
+ task = vertex_task_pick_fre_to_req(tmgr);
+ taskmgr_x_barrier_irqr(tmgr, 0, flags);
+ if (!task) {
+ ret = -ENOMEM;
+ vertex_err("free task is not remained (%u)\n", graph->idx);
+ vertex_task_print_all(tmgr);
+ goto p_err;
+ }
+
+ graph->inhash[incl->index] = task->index;
+ graph->othash[otcl->index] = task->index;
+ graph->input_cnt++;
+
+ task->id = incl->id;
+ task->incl = incl;
+ task->otcl = otcl;
+ task->message = VERTEX_TASK_REQUEST;
+ task->param0 = 0;
+ task->param1 = 0;
+ task->param2 = 0;
+ task->param3 = 0;
+ clear_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags);
+
+ if ((incl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP)) ||
+ (otcl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))) {
+ set_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags);
+ vertex_get_timestamp(&task->time[VERTEX_TIME_QUEUE]);
+ }
+
+ vertex_graphmgr_queue(graph->owner, task);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_deque(struct vertex_graph *graph,
+ struct vertex_container_list *clist)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+ unsigned int tidx;
+ struct vertex_task *task;
+ unsigned long flags;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+
+ if (!test_bit(VERTEX_GRAPH_STATE_START, &graph->state)) {
+ ret = -EINVAL;
+ vertex_err("graph(%u) is not started (%lu)\n",
+ graph->idx, graph->state);
+ goto p_err;
+ }
+
+ if (clist->direction == VS4L_DIRECTION_IN)
+ tidx = graph->inhash[clist->index];
+ else
+ tidx = graph->othash[clist->index];
+
+ if (tidx >= VERTEX_MAX_TASK) {
+ ret = -EINVAL;
+ vertex_err("task index(%u) invalid (%u)\n", tidx, graph->idx);
+ goto p_err;
+ }
+
+ task = &tmgr->task[tidx];
+ if (task->state != VERTEX_TASK_STATE_COMPLETE) {
+ ret = -EINVAL;
+ vertex_err("task(%u) state(%d) is invalid (%u)\n",
+ tidx, task->state, graph->idx);
+ goto p_err;
+ }
+
+ if (clist->direction == VS4L_DIRECTION_IN) {
+ if (task->incl != clist) {
+ ret = -EINVAL;
+ vertex_err("incl ptr is invalid (%u)\n", graph->idx);
+ goto p_err;
+ }
+
+ graph->inhash[clist->index] = VERTEX_MAX_TASK;
+ task->incl = NULL;
+ } else {
+ if (task->otcl != clist) {
+ ret = -EINVAL;
+ vertex_err("otcl ptr is invalid (%u)\n", graph->idx);
+ goto p_err;
+ }
+
+ graph->othash[clist->index] = VERTEX_MAX_TASK;
+ task->otcl = NULL;
+ }
+
+ if (task->incl || task->otcl)
+ return 0;
+
+ taskmgr_e_barrier_irqs(tmgr, 0, flags);
+ vertex_task_trans_com_to_fre(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, 0, flags);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_start(struct vertex_graph *graph)
+{
+ int ret;
+
+ vertex_enter();
+ ret = __vertex_graph_start(graph);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_stop(struct vertex_graph *graph)
+{
+ vertex_enter();
+
+ __vertex_graph_stop(graph);
+
+ vertex_leave();
+ return 0;
+}
+
+const struct vertex_queue_gops vertex_queue_gops = {
+ .set_graph = vertex_graph_set_graph,
+ .set_format = vertex_graph_set_format,
+ .set_param = vertex_graph_set_param,
+ .set_ctrl = vertex_graph_set_ctrl,
+ .queue = vertex_graph_queue,
+ .deque = vertex_graph_deque,
+ .start = vertex_graph_start,
+ .stop = vertex_graph_stop
+};
+
+static int vertex_graph_control(struct vertex_graph *graph,
+ struct vertex_task *task)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+
+ switch (task->message) {
+ case VERTEX_CTRL_STOP:
+ graph->control.message = VERTEX_CTRL_STOP_DONE;
+ wake_up(&graph->control_wq);
+ break;
+ default:
+ ret = -EINVAL;
+ vertex_err("invalid task message(%u) of graph(%u)\n",
+ task->message, graph->idx);
+ vertex_task_print_all(tmgr);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_request(struct vertex_graph *graph,
+ struct vertex_task *task)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+ unsigned long flags;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+ if (task->state != VERTEX_TASK_STATE_REQUEST) {
+ ret = -EINVAL;
+ vertex_err("task state(%u) is not REQUEST (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
+ }
+
+ taskmgr_e_barrier_irqs(tmgr, 0, flags);
+ vertex_task_trans_req_to_pre(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, 0, flags);
+
+ if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags))
+ vertex_get_timestamp(&task->time[VERTEX_TIME_REQUEST]);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_process(struct vertex_graph *graph,
+ struct vertex_task *task)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+ unsigned long flags;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+ if (task->state != VERTEX_TASK_STATE_PREPARE) {
+ ret = -EINVAL;
+ vertex_err("task state(%u) is not PREPARE (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
+ }
+
+ taskmgr_e_barrier_irqs(tmgr, TASKMGR_IDX_0, flags);
+ vertex_task_trans_pre_to_pro(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, TASKMGR_IDX_0, flags);
+
+ if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags))
+ vertex_get_timestamp(&task->time[VERTEX_TIME_PROCESS]);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_cancel(struct vertex_graph *graph,
+ struct vertex_task *task)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+ struct vertex_queue_list *qlist;
+ struct vertex_container_list *incl, *otcl;
+ unsigned long result;
+ unsigned long flags;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+ qlist = &graph->vctx->queue_list;
+ incl = task->incl;
+ otcl = task->otcl;
+
+ if (task->state != VERTEX_TASK_STATE_PROCESS) {
+ ret = -EINVAL;
+ vertex_err("task state(%u) is not PROCESS (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
+ }
+
+ if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags)) {
+ vertex_get_timestamp(&task->time[VERTEX_TIME_DONE]);
+
+ if (incl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
+ memcpy(incl->timestamp, task->time, sizeof(task->time));
+
+ if (otcl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
+ memcpy(otcl->timestamp, task->time, sizeof(task->time));
+ }
+
+ taskmgr_e_barrier_irqs(tmgr, TASKMGR_IDX_0, flags);
+ vertex_task_trans_pro_to_com(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, TASKMGR_IDX_0, flags);
+
+ graph->recent = task->id;
+ graph->done_cnt++;
+
+ result = 0;
+ set_bit(VS4L_CL_FLAG_DONE, &result);
+ set_bit(VS4L_CL_FLAG_INVALID, &result);
+
+ vertex_queue_done(qlist, incl, otcl, result);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_done(struct vertex_graph *graph,
+ struct vertex_task *task)
+{
+ int ret;
+ struct vertex_taskmgr *tmgr;
+ struct vertex_queue_list *qlist;
+ struct vertex_container_list *incl, *otcl;
+ unsigned long result;
+ unsigned long flags;
+
+ vertex_enter();
+ tmgr = &graph->taskmgr;
+ qlist = &graph->vctx->queue_list;
+ incl = task->incl;
+ otcl = task->otcl;
+
+ if (task->state != VERTEX_TASK_STATE_PROCESS) {
+ ret = -EINVAL;
+ vertex_err("task state(%u) is not PROCESS (graph:%u)\n",
+ task->state, graph->idx);
+ goto p_err;
+ }
+
+ if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &task->flags)) {
+ vertex_get_timestamp(&task->time[VERTEX_TIME_DONE]);
+
+ if (incl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
+ memcpy(incl->timestamp, task->time, sizeof(task->time));
+
+ if (otcl->flags & (1 << VS4L_CL_FLAG_TIMESTAMP))
+ memcpy(otcl->timestamp, task->time, sizeof(task->time));
+ }
+
+ taskmgr_e_barrier_irqs(tmgr, TASKMGR_IDX_0, flags);
+ vertex_task_trans_pro_to_com(tmgr, task);
+ taskmgr_x_barrier_irqr(tmgr, TASKMGR_IDX_0, flags);
+
+ graph->recent = task->id;
+ graph->done_cnt++;
+
+ result = 0;
+ if (task->param0) {
+ set_bit(VS4L_CL_FLAG_DONE, &result);
+ set_bit(VS4L_CL_FLAG_INVALID, &result);
+ } else {
+ set_bit(VS4L_CL_FLAG_DONE, &result);
+ }
+
+ vertex_queue_done(qlist, incl, otcl, result);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_graph_update_param(struct vertex_graph *graph,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ vertex_leave();
+ return 0;
+}
+
+static const struct vertex_graph_ops vertex_graph_ops = {
+ .control = vertex_graph_control,
+ .request = vertex_graph_request,
+ .process = vertex_graph_process,
+ .cancel = vertex_graph_cancel,
+ .done = vertex_graph_done,
+ .update_param = vertex_graph_update_param
+};
+
+void vertex_graph_print(struct vertex_graph *graph)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+struct vertex_graph *vertex_graph_create(struct vertex_context *vctx,
+ void *graphmgr)
+{
+ int ret;
+ struct vertex_graph *graph;
+ struct vertex_taskmgr *tmgr;
+ unsigned int idx;
+
+ vertex_enter();
+ graph = kzalloc(sizeof(*graph), GFP_KERNEL);
+ if (!graph) {
+ ret = -ENOMEM;
+ vertex_err("Failed to allocate graph\n");
+ goto p_err_kzalloc;
+ }
+
+ ret = vertex_graphmgr_grp_register(graphmgr, graph);
+ if (ret)
+ goto p_err_grp_register;
+
+ graph->owner = graphmgr;
+ graph->gops = &vertex_graph_ops;
+ mutex_init(&graph->local_lock);
+ graph->control.owner = graph;
+ graph->control.message = VERTEX_CTRL_NONE;
+ init_waitqueue_head(&graph->control_wq);
+
+ tmgr = &graph->taskmgr;
+ spin_lock_init(&tmgr->slock);
+ ret = vertex_task_init(tmgr, graph->idx, graph);
+ if (ret)
+ goto p_err_task_init;
+
+ for (idx = 0; idx < VERTEX_MAX_TASK; ++idx) {
+ graph->inhash[idx] = VERTEX_MAX_TASK;
+ graph->othash[idx] = VERTEX_MAX_TASK;
+ }
+
+ INIT_LIST_HEAD(&graph->gmodel_list);
+ graph->gmodel_count = 0;
+
+ graph->vctx = vctx;
+
+ vertex_leave();
+ return graph;
+p_err_task_init:
+p_err_grp_register:
+ kfree(graph);
+p_err_kzalloc:
+ return ERR_PTR(ret);
+}
+
+int vertex_graph_destroy(struct vertex_graph *graph)
+{
+ vertex_enter();
+ __vertex_graph_stop(graph);
+ vertex_graphmgr_grp_unregister(graph->owner, graph);
+
+ kfree(graph->inflist.formats);
+ graph->inflist.formats = NULL;
+ graph->inflist.count = 0;
+
+ kfree(graph->otflist.formats);
+ graph->otflist.formats = NULL;
+ graph->otflist.count = 0;
+
+ kfree(graph);
+ vertex_leave();
+ return 0;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_GRAPH_H__
+#define __VERTEX_GRAPH_H__
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+
+#include "vertex-config.h"
+#include "vs4l.h"
+#include "vertex-taskmgr.h"
+#include "vertex-common-type.h"
+
+struct vips_context;
+struct vertex_graph;
+
+enum vertex_graph_state {
+ VERTEX_GRAPH_STATE_CONFIG,
+ VERTEX_GRAPH_STATE_HENROLL,
+ VERTEX_GRAPH_STATE_HMAPPED,
+ VERTEX_GRAPH_STATE_MMAPPED,
+ VERTEX_GRAPH_STATE_START,
+};
+
+enum vertex_graph_flag {
+ VERTEX_GRAPH_FLAG_UPDATE_PARAM = VS4L_GRAPH_FLAG_END
+};
+
+struct vertex_format {
+ unsigned int format;
+ unsigned int plane;
+ unsigned int width;
+ unsigned int height;
+};
+
+struct vertex_format_list {
+ unsigned int count;
+ struct vertex_format *formats;
+};
+
+struct vertex_graph_ops {
+ int (*control)(struct vertex_graph *graph, struct vertex_task *task);
+ int (*request)(struct vertex_graph *graph, struct vertex_task *task);
+ int (*process)(struct vertex_graph *graph, struct vertex_task *task);
+ int (*cancel)(struct vertex_graph *graph, struct vertex_task *task);
+ int (*done)(struct vertex_graph *graph, struct vertex_task *task);
+ int (*update_param)(struct vertex_graph *graph,
+ struct vertex_task *task);
+};
+
+struct vertex_graph_model {
+ unsigned int id;
+ struct vertex_common_graph_info common_ginfo;
+ struct vertex_buffer *graph;
+ struct vertex_buffer *temp_buf;
+ struct vertex_buffer *weight;
+ struct vertex_buffer *bias;
+
+ struct list_head list;
+};
+
+struct vertex_graph {
+ unsigned int idx;
+ unsigned long state;
+ struct mutex *global_lock;
+
+ void *owner;
+ const struct vertex_graph_ops *gops;
+ struct mutex local_lock;
+ struct vertex_task control;
+ wait_queue_head_t control_wq;
+ struct vertex_taskmgr taskmgr;
+
+ unsigned int uid;
+ unsigned long flags;
+ unsigned int priority;
+
+ struct vertex_format_list inflist;
+ struct vertex_format_list otflist;
+
+ unsigned int inhash[VERTEX_MAX_TASK];
+ unsigned int othash[VERTEX_MAX_TASK];
+
+ struct list_head gmodel_list;
+ unsigned int gmodel_count;
+
+ /* for debugging */
+ unsigned int input_cnt;
+ unsigned int cancel_cnt;
+ unsigned int done_cnt;
+ unsigned int recent;
+
+ struct vertex_context *vctx;
+};
+
+extern const struct vertex_queue_gops vertex_queue_gops;
+extern const struct vertex_vctx_gops vertex_vctx_gops;
+
+void vertex_graph_print(struct vertex_graph *graph);
+
+struct vertex_graph *vertex_graph_create(struct vertex_context *vctx,
+ void *graphmgr);
+int vertex_graph_destroy(struct vertex_graph *graph);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched/types.h>
+
+#include "vertex-log.h"
+#include "vertex-device.h"
+#include "vertex-graphmgr.h"
+
+static void __vertex_taskdesc_set_free(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ taskdesc->state = VERTEX_TASKDESC_STATE_FREE;
+ list_add_tail(&taskdesc->list, &gmgr->tdfre_list);
+ gmgr->tdfre_cnt++;
+ vertex_leave();
+}
+
+static struct vertex_taskdesc *__vertex_taskdesc_get_first_free(
+ struct vertex_graphmgr *gmgr)
+{
+ struct vertex_taskdesc *taskdesc = NULL;
+
+ vertex_enter();
+ if (gmgr->tdfre_cnt)
+ taskdesc = list_first_entry(&gmgr->tdfre_list,
+ struct vertex_taskdesc, list);
+ vertex_leave();
+ return taskdesc;
+}
+
+static void __vertex_taskdesc_set_ready(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ taskdesc->state = VERTEX_TASKDESC_STATE_READY;
+ list_add_tail(&taskdesc->list, &gmgr->tdrdy_list);
+ gmgr->tdrdy_cnt++;
+ vertex_leave();
+}
+
+static struct vertex_taskdesc *__vertex_taskdesc_pop_ready(
+ struct vertex_graphmgr *gmgr)
+{
+ struct vertex_taskdesc *taskdesc = NULL;
+
+ vertex_enter();
+ if (gmgr->tdrdy_cnt) {
+ taskdesc = container_of(gmgr->tdrdy_list.next,
+ struct vertex_taskdesc, list);
+
+ list_del(&taskdesc->list);
+ gmgr->tdrdy_cnt--;
+ }
+ vertex_leave();
+ return taskdesc;
+}
+
+static void __vertex_taskdesc_set_request(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ taskdesc->state = VERTEX_TASKDESC_STATE_REQUEST;
+ list_add_tail(&taskdesc->list, &gmgr->tdreq_list);
+ gmgr->tdreq_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_set_request_sched(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc, struct vertex_taskdesc *next)
+{
+ vertex_enter();
+ taskdesc->state = VERTEX_TASKDESC_STATE_REQUEST;
+ list_add_tail(&taskdesc->list, &next->list);
+ gmgr->tdreq_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_set_process(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ taskdesc->state = VERTEX_TASKDESC_STATE_PROCESS;
+ list_add_tail(&taskdesc->list, &gmgr->tdpro_list);
+ gmgr->tdpro_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_set_complete(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ taskdesc->state = VERTEX_TASKDESC_STATE_COMPLETE;
+ list_add_tail(&taskdesc->list, &gmgr->tdcom_list);
+ gmgr->tdcom_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_fre_to_rdy(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdfre_cnt) {
+ vertex_warn("tdfre_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_FREE) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdfre_cnt--;
+ __vertex_taskdesc_set_ready(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_rdy_to_fre(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdrdy_cnt) {
+ vertex_warn("tdrdy_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_READY) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdrdy_cnt--;
+ __vertex_taskdesc_set_free(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_req_to_pro(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdreq_cnt) {
+ vertex_warn("tdreq_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_REQUEST) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdreq_cnt--;
+ __vertex_taskdesc_set_process(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_req_to_fre(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdreq_cnt) {
+ vertex_warn("tdreq_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_REQUEST) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdreq_cnt--;
+ __vertex_taskdesc_set_free(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_alc_to_pro(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdalc_cnt) {
+ vertex_warn("tdalc_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_ALLOC) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdalc_cnt--;
+ __vertex_taskdesc_set_process(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_alc_to_fre(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdalc_cnt) {
+ vertex_warn("tdalc_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_ALLOC) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdalc_cnt--;
+ __vertex_taskdesc_set_free(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_pro_to_com(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdpro_cnt) {
+ vertex_warn("tdpro_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_PROCESS) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdpro_cnt--;
+ __vertex_taskdesc_set_complete(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_pro_to_fre(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdpro_cnt) {
+ vertex_warn("tdpro_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_PROCESS) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdpro_cnt--;
+ __vertex_taskdesc_set_free(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static void __vertex_taskdesc_trans_com_to_fre(struct vertex_graphmgr *gmgr,
+ struct vertex_taskdesc *taskdesc)
+{
+ vertex_enter();
+ if (!gmgr->tdcom_cnt) {
+ vertex_warn("tdcom_cnt is zero\n");
+ return;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_COMPLETE) {
+ vertex_warn("state(%x) is invlid\n", taskdesc->state);
+ return;
+ }
+
+ list_del(&taskdesc->list);
+ gmgr->tdcom_cnt--;
+ __vertex_taskdesc_set_free(gmgr, taskdesc);
+ vertex_leave();
+}
+
+static int __vertex_graphmgr_itf_init(struct vertex_interface *itf,
+ struct vertex_graph *graph)
+{
+ int ret;
+ unsigned long flags;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_task *itask;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vertex_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vertex_err("itask is NULL\n");
+ goto p_err_pick;
+ }
+
+ itask->id = 0;
+ itask->lock = &graph->local_lock;
+ itask->findex = VERTEX_MAX_TASK;
+ itask->tdindex = VERTEX_MAX_TASKDESC;
+ itask->message = VERTEX_TASK_INIT;
+ itask->param0 = graph->uid;
+ itask->param1 = graph->idx;
+ itask->param2 = 0;
+ itask->param3 = 0;
+
+ ret = vertex_hw_init(itf, itask);
+ if (ret)
+ goto p_err_init;
+
+ vertex_leave();
+ return 0;
+p_err_init:
+p_err_pick:
+ return ret;
+}
+
+static int __vertex_graphmgr_itf_deinit(struct vertex_interface *itf,
+ struct vertex_graph *graph)
+{
+ int ret;
+ unsigned long flags;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_task *itask;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vertex_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vertex_err("itask is NULL\n");
+ goto p_err_pick;
+ }
+
+ itask->id = 0;
+ itask->lock = &graph->local_lock;
+ itask->findex = VERTEX_MAX_TASK;
+ itask->tdindex = VERTEX_MAX_TASKDESC;
+ itask->message = VERTEX_TASK_DEINIT;
+ itask->param0 = graph->uid;
+ itask->param1 = graph->idx;
+ itask->param2 = 0;
+ itask->param3 = 0;
+
+ ret = vertex_hw_deinit(itf, itask);
+ if (ret)
+ goto p_err_deinit;
+
+ vertex_leave();
+ return 0;
+p_err_deinit:
+p_err_pick:
+ return ret;
+}
+
+static int __vertex_graphmgr_itf_create(struct vertex_interface *itf,
+ struct vertex_graph *graph)
+{
+ int ret;
+ unsigned long flags;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_task *itask;
+
+ /* TODO check */
+ return 0;
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vertex_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vertex_err("itask is NULL\n");
+ goto p_err;
+ }
+
+ itask->id = 0;
+ itask->lock = &graph->local_lock;
+ itask->findex = VERTEX_MAX_TASK;
+ itask->tdindex = VERTEX_MAX_TASKDESC;
+ itask->message = VERTEX_TASK_CREATE;
+ itask->param0 = graph->uid;
+ itask->param1 = graph->idx;
+ itask->param2 = 0;
+ itask->param3 = 0;
+
+ ret = vertex_hw_create(itf, itask);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_graphmgr_itf_destroy(struct vertex_interface *itf,
+ struct vertex_graph *graph)
+{
+ int ret;
+ unsigned long flags;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_task *itask;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vertex_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vertex_err("itask is NULL\n");
+ goto p_err;
+ }
+
+ itask->id = 0;
+ itask->lock = &graph->local_lock;
+ itask->findex = VERTEX_MAX_TASK;
+ itask->tdindex = VERTEX_MAX_TASKDESC;
+ itask->message = VERTEX_TASK_DESTROY;
+ itask->param0 = graph->uid;
+ itask->param1 = graph->idx;
+ itask->param2 = 0;
+ itask->param3 = 0;
+
+ ret = vertex_hw_destroy(itf, itask);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_graphmgr_itf_config(struct vertex_interface *itf,
+ struct vertex_graph *graph)
+{
+ int ret;
+ unsigned long flags;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_task *itask;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vertex_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vertex_err("itask is NULL\n");
+ goto p_err;
+ }
+
+ itask->id = 0;
+ itask->lock = &graph->local_lock;
+ itask->findex = VERTEX_MAX_TASK;
+ itask->tdindex = VERTEX_MAX_TASKDESC;
+ itask->message = VERTEX_TASK_ALLOCATE;
+ itask->param0 = graph->uid;
+ itask->param1 = graph->idx;
+ itask->param2 = (unsigned long)&graph->inflist;
+ itask->param3 = (unsigned long)&graph->otflist;
+
+ ret = vertex_hw_config(itf, itask);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_graphmgr_itf_process(struct vertex_interface *itf,
+ struct vertex_graph *graph, struct vertex_task *task)
+{
+ int ret;
+ unsigned long flags;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_task *itask;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ itask = vertex_task_pick_fre_to_req(itaskmgr);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ if (!itask) {
+ ret = -ENOMEM;
+ vertex_err("itask is NULL\n");
+ goto p_err_pick;
+ }
+
+ if (test_bit(VERTEX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags)) {
+ ret = graph->gops->update_param(graph, task);
+ if (ret)
+ goto p_err_update_param;
+
+ clear_bit(VERTEX_GRAPH_FLAG_UPDATE_PARAM, &graph->flags);
+ }
+
+ itask->id = task->id;
+ itask->lock = &graph->local_lock;
+ itask->findex = task->index;
+ itask->tdindex = task->tdindex;
+ itask->message = VERTEX_TASK_PROCESS;
+ itask->param0 = graph->uid;
+ itask->param1 = graph->idx;
+ itask->param2 = 0; /* return : DONE or NDONE */
+ itask->param3 = 0; /* return : error code if param2 is NDONE */
+ itask->flags = task->flags;
+ itask->incl = task->incl;
+ itask->otcl = task->otcl;
+
+ ret = graph->gops->process(graph, task);
+ if (ret)
+ goto p_err_process;
+
+ ret = vertex_hw_process(itf, itask);
+ if (ret)
+ goto p_err_hw_process;
+
+ vertex_leave();
+ return 0;
+p_err_hw_process:
+p_err_process:
+p_err_update_param:
+p_err_pick:
+ return ret;
+}
+
+static void __vertex_graphmgr_sched(struct vertex_graphmgr *gmgr)
+{
+ int ret;
+ struct vertex_taskdesc *ready_td, *list_td, *temp;
+ struct vertex_graph *graph;
+ struct vertex_task *task;
+
+ vertex_enter();
+
+ mutex_lock(&gmgr->tdlock);
+
+ while (1) {
+ ready_td = __vertex_taskdesc_pop_ready(gmgr);
+ if (!ready_td)
+ break;
+
+ if (!gmgr->tdreq_cnt) {
+ __vertex_taskdesc_set_request(gmgr, ready_td);
+ continue;
+ }
+
+ list_for_each_entry_safe(list_td, temp, &gmgr->tdreq_list,
+ list) {
+ if (ready_td->priority > list_td->priority) {
+ __vertex_taskdesc_set_request_sched(gmgr,
+ ready_td, list_td);
+ break;
+ }
+ }
+
+ if (ready_td->state == VERTEX_TASKDESC_STATE_READY)
+ __vertex_taskdesc_set_request(gmgr, ready_td);
+ }
+
+ list_for_each_entry_safe(list_td, temp, &gmgr->tdreq_list, list) {
+ __vertex_taskdesc_trans_req_to_pro(gmgr, list_td);
+ }
+
+ mutex_unlock(&gmgr->tdlock);
+
+ list_for_each_entry_safe(list_td, temp, &gmgr->tdpro_list, list) {
+ graph = list_td->graph;
+ task = list_td->task;
+
+ ret = __vertex_graphmgr_itf_process(gmgr->interface,
+ graph, task);
+ if (ret) {
+ ret = graph->gops->cancel(graph, task);
+ if (ret)
+ return;
+
+ list_td->graph = NULL;
+ list_td->task = NULL;
+ __vertex_taskdesc_trans_pro_to_fre(gmgr, list_td);
+ continue;
+ }
+
+ __vertex_taskdesc_trans_pro_to_com(gmgr, list_td);
+ }
+
+ gmgr->sched_cnt++;
+ vertex_leave();
+}
+
+static void vertex_graph_thread(struct kthread_work *work)
+{
+ int ret;
+ struct vertex_task *task;
+ struct vertex_graph *graph;
+ struct vertex_graphmgr *gmgr;
+ struct vertex_taskdesc *taskdesc, *temp;
+
+ vertex_enter();
+ task = container_of(work, struct vertex_task, work);
+ graph = task->owner;
+ gmgr = graph->owner;
+
+ switch (task->message) {
+ case VERTEX_TASK_REQUEST:
+ ret = graph->gops->request(graph, task);
+ if (ret)
+ return;
+
+ taskdesc = __vertex_taskdesc_get_first_free(gmgr);
+ if (!taskdesc) {
+ vertex_err("taskdesc is NULL\n");
+ return;
+ }
+
+ task->tdindex = taskdesc->index;
+ taskdesc->graph = graph;
+ taskdesc->task = task;
+ taskdesc->priority = graph->priority;
+ __vertex_taskdesc_trans_fre_to_rdy(gmgr, taskdesc);
+ break;
+ case VERTEX_CTRL_STOP:
+ list_for_each_entry_safe(taskdesc, temp,
+ &gmgr->tdrdy_list, list) {
+ if (taskdesc->graph->idx != graph->idx)
+ continue;
+
+ ret = graph->gops->cancel(graph, taskdesc->task);
+ if (ret)
+ return;
+
+ taskdesc->graph = NULL;
+ taskdesc->task = NULL;
+ __vertex_taskdesc_trans_rdy_to_fre(gmgr, taskdesc);
+ }
+
+ mutex_lock(&gmgr->tdlock);
+
+ list_for_each_entry_safe(taskdesc, temp,
+ &gmgr->tdreq_list, list) {
+ if (taskdesc->graph->idx != graph->idx)
+ continue;
+
+ ret = graph->gops->cancel(graph, taskdesc->task);
+ if (ret)
+ return;
+
+ taskdesc->graph = NULL;
+ taskdesc->task = NULL;
+ __vertex_taskdesc_trans_req_to_fre(gmgr, taskdesc);
+ }
+
+ mutex_unlock(&gmgr->tdlock);
+
+ ret = graph->gops->control(graph, task);
+ if (ret)
+ return;
+ /* TODO check return/break */
+ return;
+ default:
+ vertex_err("message of task is invalid (%d)\n", task->message);
+ return;
+ }
+
+ __vertex_graphmgr_sched(gmgr);
+ vertex_leave();
+}
+
+static void vertex_interface_thread(struct kthread_work *work)
+{
+ int ret;
+ struct vertex_task *task, *itask;
+ struct vertex_interface *itf;
+ struct vertex_graphmgr *gmgr;
+ unsigned int taskdesc_index;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_graph *graph;
+ struct vertex_taskdesc *taskdesc;
+ unsigned long flags;
+
+ vertex_enter();
+ itask = container_of(work, struct vertex_task, work);
+ itf = itask->owner;
+ itaskmgr = &itf->taskmgr;
+ gmgr = itf->cookie;
+
+ if (itask->findex >= VERTEX_MAX_TASK) {
+ vertex_err("task index(%u) is invalid (%u)\n",
+ itask->findex, VERTEX_MAX_TASK);
+ return;
+ }
+
+ switch (itask->message) {
+ case VERTEX_TASK_ALLOCATE:
+ taskdesc_index = itask->tdindex;
+ if (taskdesc_index >= VERTEX_MAX_TASKDESC) {
+ vertex_err("taskdesc index(%u) is invalid (%u)\n",
+ taskdesc_index, VERTEX_MAX_TASKDESC);
+ return;
+ }
+
+ taskdesc = &gmgr->taskdesc[taskdesc_index];
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_ALLOC) {
+ vertex_err("taskdesc state(%u) is not ALLOC (%u)\n",
+ taskdesc->state, taskdesc_index);
+ return;
+ }
+
+ graph = taskdesc->graph;
+ if (!graph) {
+ vertex_err("graph is NULL (%u)\n", taskdesc_index);
+ return;
+ }
+
+ task = taskdesc->task;
+ if (!task) {
+ vertex_err("task is NULL (%u)\n", taskdesc_index);
+ return;
+ }
+
+ task->message = itask->message;
+ task->param0 = itask->param2;
+ task->param1 = itask->param3;
+
+ /* return status check */
+ if (task->param0) {
+ vertex_err("task allocation failed (%lu, %lu)\n",
+ task->param0, task->param1);
+
+ ret = graph->gops->cancel(graph, task);
+ if (ret)
+ return;
+
+ /* taskdesc cleanup */
+ mutex_lock(&gmgr->tdlock);
+ taskdesc->graph = NULL;
+ taskdesc->task = NULL;
+ __vertex_taskdesc_trans_alc_to_fre(gmgr, taskdesc);
+ mutex_unlock(&gmgr->tdlock);
+ } else {
+ /* taskdesc transition */
+ mutex_lock(&gmgr->tdlock);
+ __vertex_taskdesc_trans_alc_to_pro(gmgr, taskdesc);
+ mutex_unlock(&gmgr->tdlock);
+ }
+
+ /* itask cleanup */
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_com_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ break;
+ case VERTEX_TASK_PROCESS:
+ taskdesc_index = itask->tdindex;
+ if (taskdesc_index >= VERTEX_MAX_TASKDESC) {
+ vertex_err("taskdesc index(%u) is invalid (%u)\n",
+ taskdesc_index, VERTEX_MAX_TASKDESC);
+ return;
+ }
+
+ taskdesc = &gmgr->taskdesc[taskdesc_index];
+ if (taskdesc->state == VERTEX_TASKDESC_STATE_FREE) {
+ vertex_err("taskdesc(%u) state is FREE\n",
+ taskdesc_index);
+
+ /* itask cleanup */
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_com_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ break;
+ }
+
+ if (taskdesc->state != VERTEX_TASKDESC_STATE_COMPLETE) {
+ vertex_err("taskdesc state is not COMPLETE (%u)\n",
+ taskdesc->state);
+ return;
+ }
+
+ graph = taskdesc->graph;
+ if (!graph) {
+ vertex_err("graph is NULL (%u)\n", taskdesc_index);
+ return;
+ }
+
+ task = taskdesc->task;
+ if (!task) {
+ vertex_err("task is NULL (%u)\n", taskdesc_index);
+ return;
+ }
+
+ task->message = itask->message;
+ task->tdindex = VERTEX_MAX_TASKDESC;
+ task->param0 = itask->param2;
+ task->param1 = itask->param3;
+
+ ret = graph->gops->done(graph, task);
+ if (ret)
+ return;
+
+ /* taskdesc cleanup */
+ taskdesc->graph = NULL;
+ taskdesc->task = NULL;
+ __vertex_taskdesc_trans_com_to_fre(gmgr, taskdesc);
+
+ /* itask cleanup */
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_com_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ break;
+ default:
+ vertex_err("message of task is invalid (%d)\n", itask->message);
+ return;
+ }
+
+ __vertex_graphmgr_sched(gmgr);
+ vertex_leave();
+}
+
+void vertex_taskdesc_print(struct vertex_graphmgr *gmgr)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+int vertex_graphmgr_grp_register(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph)
+{
+ int ret;
+ unsigned int index;
+
+ vertex_enter();
+ mutex_lock(&gmgr->mlock);
+ for (index = 0; index < VERTEX_MAX_GRAPH; ++index) {
+ if (!gmgr->graph[index]) {
+ gmgr->graph[index] = graph;
+ graph->idx = index;
+ break;
+ }
+ }
+ mutex_unlock(&gmgr->mlock);
+
+ if (index >= VERTEX_MAX_GRAPH) {
+ ret = -EINVAL;
+ vertex_err("graph slot is lack\n");
+ goto p_err;
+ }
+
+ kthread_init_work(&graph->control.work, vertex_graph_thread);
+ for (index = 0; index < VERTEX_MAX_TASK; ++index)
+ kthread_init_work(&graph->taskmgr.task[index].work,
+ vertex_graph_thread);
+
+ graph->global_lock = &gmgr->mlock;
+ atomic_inc(&gmgr->active_cnt);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_graphmgr_grp_unregister(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph)
+{
+ vertex_enter();
+ mutex_lock(&gmgr->mlock);
+ gmgr->graph[graph->idx] = NULL;
+ mutex_unlock(&gmgr->mlock);
+ atomic_dec(&gmgr->active_cnt);
+ vertex_leave();
+ return 0;
+}
+
+int vertex_graphmgr_grp_start(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph)
+{
+ int ret;
+
+ vertex_enter();
+ mutex_lock(&gmgr->mlock);
+ if (!test_bit(VERTEX_GRAPHMGR_ENUM, &gmgr->state)) {
+ ret = __vertex_graphmgr_itf_init(gmgr->interface, graph);
+ if (ret) {
+ mutex_unlock(&gmgr->mlock);
+ goto p_err_init;
+ }
+ set_bit(VERTEX_GRAPHMGR_ENUM, &gmgr->state);
+ }
+ mutex_unlock(&gmgr->mlock);
+
+ ret = __vertex_graphmgr_itf_create(gmgr->interface, graph);
+ if (ret)
+ goto p_err_create;
+
+ ret = __vertex_graphmgr_itf_config(gmgr->interface, graph);
+ if (ret)
+ goto p_err_config;
+
+ vertex_leave();
+ return 0;
+p_err_config:
+p_err_create:
+p_err_init:
+ return ret;
+}
+
+int vertex_graphmgr_grp_stop(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph)
+{
+ int ret;
+
+ vertex_enter();
+ ret = __vertex_graphmgr_itf_destroy(gmgr->interface, graph);
+ if (ret)
+ goto p_err_destroy;
+
+ mutex_lock(&gmgr->mlock);
+ if (test_bit(VERTEX_GRAPHMGR_ENUM, &gmgr->state) &&
+ atomic_read(&gmgr->active_cnt) == 1) {
+ ret = __vertex_graphmgr_itf_deinit(gmgr->interface, graph);
+ if (ret) {
+ mutex_unlock(&gmgr->mlock);
+ goto p_err_deinit;
+ }
+ clear_bit(VERTEX_GRAPHMGR_ENUM, &gmgr->state);
+ }
+ mutex_unlock(&gmgr->mlock);
+
+ vertex_leave();
+ return 0;
+p_err_deinit:
+p_err_destroy:
+ return ret;
+}
+
+void vertex_graphmgr_queue(struct vertex_graphmgr *gmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ kthread_queue_work(&gmgr->worker, &task->work);
+ vertex_leave();
+}
+
+int vertex_graphmgr_open(struct vertex_graphmgr *gmgr)
+{
+ int ret;
+ char name[30];
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+
+ vertex_enter();
+ kthread_init_worker(&gmgr->worker);
+ snprintf(name, sizeof(name), "vertex_graph");
+ gmgr->graph_task = kthread_run(kthread_worker_fn,
+ &gmgr->worker, name);
+ if (IS_ERR(gmgr->graph_task)) {
+ ret = PTR_ERR(gmgr->graph_task);
+ vertex_err("kthread_run is fail\n");
+ goto p_err_run;
+ }
+
+ ret = sched_setscheduler_nocheck(gmgr->graph_task,
+ SCHED_FIFO, ¶m);
+ if (ret) {
+ vertex_err("sched_setscheduler_nocheck is fail(%d)\n", ret);
+ goto p_err_sched;
+ }
+
+ gmgr->current_model = NULL;
+ set_bit(VERTEX_GRAPHMGR_OPEN, &gmgr->state);
+ vertex_leave();
+ return 0;
+p_err_sched:
+ kthread_stop(gmgr->graph_task);
+p_err_run:
+ return ret;
+}
+
+int vertex_graphmgr_close(struct vertex_graphmgr *gmgr)
+{
+ vertex_enter();
+ kthread_stop(gmgr->graph_task);
+ clear_bit(VERTEX_GRAPHMGR_OPEN, &gmgr->state);
+ clear_bit(VERTEX_GRAPHMGR_ENUM, &gmgr->state);
+ vertex_leave();
+ return 0;
+}
+
+int vertex_graphmgr_probe(struct vertex_system *sys)
+{
+ struct vertex_graphmgr *gmgr;
+ int index;
+
+ vertex_enter();
+ gmgr = &sys->graphmgr;
+ gmgr->interface = &sys->interface;
+
+ atomic_set(&gmgr->active_cnt, 0);
+ mutex_init(&gmgr->mlock);
+ mutex_init(&gmgr->tdlock);
+
+ INIT_LIST_HEAD(&gmgr->tdfre_list);
+ INIT_LIST_HEAD(&gmgr->tdrdy_list);
+ INIT_LIST_HEAD(&gmgr->tdreq_list);
+ INIT_LIST_HEAD(&gmgr->tdalc_list);
+ INIT_LIST_HEAD(&gmgr->tdpro_list);
+ INIT_LIST_HEAD(&gmgr->tdcom_list);
+
+ for (index = 0; index < VERTEX_MAX_TASKDESC; ++index) {
+ gmgr->taskdesc[index].index = index;
+ __vertex_taskdesc_set_free(gmgr, &gmgr->taskdesc[index]);
+ }
+
+ for (index = 0; index < VERTEX_MAX_TASK; ++index)
+ kthread_init_work(&gmgr->interface->taskmgr.task[index].work,
+ vertex_interface_thread);
+
+ return 0;
+}
+
+void vertex_graphmgr_remove(struct vertex_graphmgr *gmgr)
+{
+ mutex_destroy(&gmgr->tdlock);
+ mutex_destroy(&gmgr->mlock);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_GRAPHMGR_H___
+#define __VERTEX_GRAPHMGR_H___
+
+#include <linux/kthread.h>
+#include <linux/types.h>
+
+#include "vertex-config.h"
+#include "vertex-taskmgr.h"
+#include "vertex-graph.h"
+#include "vertex-interface.h"
+
+struct vertex_system;
+
+enum vertex_taskdesc_state {
+ VERTEX_TASKDESC_STATE_FREE,
+ VERTEX_TASKDESC_STATE_READY,
+ VERTEX_TASKDESC_STATE_REQUEST,
+ VERTEX_TASKDESC_STATE_ALLOC,
+ VERTEX_TASKDESC_STATE_PROCESS,
+ VERTEX_TASKDESC_STATE_COMPLETE
+};
+
+struct vertex_taskdesc {
+ struct list_head list;
+ unsigned int index;
+ unsigned int priority;
+ struct vertex_graph *graph;
+ struct vertex_task *task;
+ unsigned int state;
+};
+
+enum vertex_graphmgr_state {
+ VERTEX_GRAPHMGR_OPEN,
+ VERTEX_GRAPHMGR_ENUM
+};
+
+enum vertex_graphmgr_client {
+ VERTEX_GRAPHMGR_CLIENT_GRAPH = 1,
+ VERTEX_GRAPHMGR_CLIENT_INTERFACE
+};
+
+struct vertex_graphmgr {
+ struct vertex_graph *graph[VERTEX_MAX_GRAPH];
+ atomic_t active_cnt;
+ unsigned long state;
+ struct mutex mlock;
+
+ unsigned int tick_cnt;
+ unsigned int tick_pos;
+ unsigned int sched_cnt;
+ unsigned int sched_pos;
+ struct kthread_worker worker;
+ struct task_struct *graph_task;
+
+ struct vertex_interface *interface;
+
+ struct mutex tdlock;
+ struct vertex_taskdesc taskdesc[VERTEX_MAX_TASKDESC];
+ struct list_head tdfre_list;
+ struct list_head tdrdy_list;
+ struct list_head tdreq_list;
+ struct list_head tdalc_list;
+ struct list_head tdpro_list;
+ struct list_head tdcom_list;
+ unsigned int tdfre_cnt;
+ unsigned int tdrdy_cnt;
+ unsigned int tdreq_cnt;
+ unsigned int tdalc_cnt;
+ unsigned int tdpro_cnt;
+ unsigned int tdcom_cnt;
+
+ struct vertex_graph_model *current_model;
+};
+
+void vertex_taskdesc_print(struct vertex_graphmgr *gmgr);
+int vertex_graphmgr_grp_register(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph);
+int vertex_graphmgr_grp_unregister(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph);
+int vertex_graphmgr_grp_start(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph);
+int vertex_graphmgr_grp_stop(struct vertex_graphmgr *gmgr,
+ struct vertex_graph *graph);
+void vertex_graphmgr_queue(struct vertex_graphmgr *gmgr,
+ struct vertex_task *task);
+
+int vertex_graphmgr_open(struct vertex_graphmgr *gmgr);
+int vertex_graphmgr_close(struct vertex_graphmgr *gmgr);
+
+int vertex_graphmgr_probe(struct vertex_system *sys);
+void vertex_graphmgr_remove(struct vertex_graphmgr *gmgr);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include "vertex-log.h"
+#include "vertex-device.h"
+#include "vertex-binary.h"
+#include "vertex-queue.h"
+#include "vertex-graphmgr.h"
+#include "vertex-memory.h"
+#include "vertex-mailbox.h"
+#include "vertex-message.h"
+#include "vertex-system.h"
+#include "vertex-interface.h"
+
+#define enter_request_barrier(task) mutex_lock(task->lock)
+#define exit_request_barrier(task) mutex_unlock(task->lock)
+#define enter_process_barrier(itf) spin_lock_irq(&itf->process_barrier)
+#define exit_process_barrier(itf) spin_unlock_irq(&itf->process_barrier)
+
+/* TODO check */
+//#define NON_BLOCK_INVOKE
+
+static inline void __vertex_interface_set_reply(struct vertex_interface *itf,
+ unsigned int gidx)
+{
+ vertex_enter();
+ itf->reply[gidx].valid = 1;
+ wake_up(&itf->reply_wq);
+ vertex_leave();
+}
+
+static inline void __vertex_interface_clear_reply(struct vertex_interface *itf,
+ unsigned int gidx)
+{
+ vertex_enter();
+ itf->reply[gidx].valid = 0;
+ vertex_leave();
+}
+
+static int __vertex_interface_wait_reply(struct vertex_interface *itf,
+ unsigned int gidx)
+{
+ int ret;
+ unsigned int cmd;
+ unsigned long type;
+ long wait_time, timeout;
+
+ vertex_enter();
+ cmd = itf->request[gidx]->message;
+ type = itf->request[gidx]->param3;
+
+#ifdef NON_BLOCK_INVOKE
+ wait_time = VERTEX_COMMAND_TIMEOUT;
+#else
+ wait_time = VERTEX_COMMAND_TIMEOUT * 2;
+#endif
+ timeout = wait_event_timeout(itf->reply_wq, itf->reply[gidx].valid,
+ wait_time);
+ if (!timeout) {
+ vertex_debug_write_log_binary();
+ vertex_debug_dump_debug_regs();
+ ret = -ETIMEDOUT;
+ vertex_err("wait time(%u ms) is expired (%u/%u/%lu)\n",
+ jiffies_to_msecs(wait_time), gidx, cmd, type);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_interface_send_interrupt(struct vertex_interface *itf)
+{
+#ifndef VERTEX_MBOX_EMULATOR
+ unsigned long type;
+ unsigned int offset;
+ int try_count = 100;
+ unsigned int val;
+
+ vertex_enter();
+ type = itf->process->param3;
+ offset = (type == VERTEX_MTYPE_H2F_NORMAL) ? 0x10 : 0x0C;
+
+ /* Check interrupt clear */
+ while (try_count) {
+ val = readl(itf->regs + offset);
+ if (!val)
+ break;
+
+ vertex_warn("interrupt(0x%x) is not yet clear (%u)\n",
+ val, try_count);
+ udelay(1);
+ }
+
+ /* Raise interrupt */
+ if (!val)
+ writel(0x100, itf->regs + offset);
+ else
+ vertex_err("failed to send interrupt(0x%x)\n", val);
+
+ vertex_leave();
+#endif
+}
+
+static int __vertex_interface_send_command(struct vertex_interface *itf,
+ struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_mailbox_ctrl *mctrl;
+ unsigned int cmd;
+ void *msg;
+ unsigned long gidx, size, type;
+ unsigned int cid;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+ mctrl = itf->mctrl;
+
+ cmd = itask->message;
+ msg = (void *)itask->param0;
+ gidx = itask->param1;
+ size = itask->param2;
+ type = itask->param3;
+ cid = itask->index;
+
+ enter_request_barrier(itask);
+ itf->request[gidx] = itask;
+
+ enter_process_barrier(itf);
+ itf->process = itask;
+
+ ret = vertex_mbox_check_full(mctrl, type, VERTEX_MAILBOX_WAIT);
+ if (ret) {
+ vertex_err("mbox is full (%lu/%u/%u)\n", gidx, cmd, cid);
+ goto p_err_process;
+ }
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_req_to_pro(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ ret = vertex_mbox_write(mctrl, type, msg, size);
+ if (ret) {
+ vertex_err("writing to mbox is failed (%lu/%u/%u)\n",
+ gidx, cmd, cid);
+ goto p_err_process;
+ }
+
+ __vertex_interface_send_interrupt(itf);
+ itf->process = NULL;
+ exit_process_barrier(itf);
+
+ ret = __vertex_interface_wait_reply(itf, gidx);
+ if (ret) {
+ vertex_err("No reply (%lu/%u/%u)\n", gidx, cmd, cid);
+ goto p_err_request;
+ }
+
+ __vertex_interface_clear_reply(itf, gidx);
+
+ if (itf->reply[gidx].param1) {
+ ret = itf->reply[gidx].param1;
+ vertex_err("Reply(%d) is error (%lu/%u/%u)\n",
+ ret, gidx, cmd, cid);
+ goto p_err_request;
+ }
+
+ itf->request[gidx] = NULL;
+ exit_request_barrier(itask);
+
+ vertex_leave();
+ return 0;
+p_err_process:
+ itf->process = NULL;
+ exit_process_barrier(itf);
+p_err_request:
+ itf->request[gidx] = NULL;
+ exit_request_barrier(itask);
+ return ret;
+}
+
+#ifdef NON_BLOCK_INVOKE
+static int __vertex_interface_send_command_nonblocking(
+ struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_mailbox_ctrl *mctrl;
+ unsigned int cmd;
+ void *msg;
+ unsigned long gidx, size, type;
+ unsigned int cid;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+ mctrl = itf->mctrl;
+
+ cmd = itask->message;
+ msg = (void *)itask->param0;
+ gidx = itask->param1;
+ size = itask->param2;
+ type = itask->param3;
+ cid = itask->index;
+
+ enter_process_barrier(itf);
+ itf->process = itask;
+
+ ret = vertex_mbox_check_full(mctrl, type, VERTEX_MAILBOX_WAIT);
+ if (ret) {
+ vertex_err("mbox is full (%lu/%u/%u)\n", gidx, cmd, cid);
+ goto p_err_process;
+ }
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_req_to_pro(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ ret = vertex_mbox_write(mctrl, type, msg, size);
+ if (ret) {
+ vertex_err("writing to mbox is failed (%lu/%u/%u)\n",
+ gidx, cmd, cid);
+ goto p_err_process;
+ }
+
+ __vertex_interface_send_interrupt(itf);
+ itf->process = NULL;
+ exit_process_barrier(itf);
+
+ vertex_leave();
+ return 0;
+p_err_process:
+ itf->process = NULL;
+ exit_process_barrier(itf);
+ return ret;
+}
+#endif
+
+int vertex_hw_wait_bootup(struct vertex_interface *itf)
+{
+ int ret;
+ int try_cnt = 1000;
+
+ vertex_enter();
+ while (!test_bit(VERTEX_ITF_STATE_BOOTUP, &itf->state)) {
+ if (!try_cnt)
+ break;
+
+ udelay(100);
+ try_cnt--;
+ }
+
+ if (!test_bit(VERTEX_ITF_STATE_BOOTUP, &itf->state)) {
+ vertex_debug_write_log_binary();
+ vertex_debug_dump_debug_regs();
+ ret = -ETIMEDOUT;
+ vertex_err("Failed to boot CM7 (%d)\n", ret);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_interface_get_size_format(struct vertex_format *format,
+ unsigned int plane, unsigned int *width, unsigned int *height)
+{
+ int ret;
+
+ vertex_enter();
+ switch (format->format) {
+ case VS4L_DF_IMAGE_U8:
+ case VS4L_DF_IMAGE_U16:
+ case VS4L_DF_IMAGE_RGB:
+ case VS4L_DF_IMAGE_U32:
+ *width = format->width;
+ *height = format->height;
+ break;
+ case VS4L_DF_IMAGE_NV21:
+ if (plane == 0) {
+ *width = format->width;
+ *height = format->height;
+ } else if (plane == 1) {
+ *width = format->width;
+ *height = format->height / 2;
+ } else {
+ vertex_err("Invalid plane(%d) for NV21\n", plane);
+ ret = -EINVAL;
+ goto p_err;
+ }
+ break;
+ case VS4L_DF_IMAGE_YV12:
+ case VS4L_DF_IMAGE_I420:
+ if (plane == 0) {
+ *width = format->width;
+ *height = format->height;
+ } else if ((plane == 1) || (plane == 2)) {
+ *width = format->width / 2;
+ *height = format->height / 2;
+ } else {
+ vertex_err("Invalid plane(%d) for YV12/I420\n", plane);
+ ret = -EINVAL;
+ goto p_err;
+ }
+ break;
+ case VS4L_DF_IMAGE_I422:
+ if (plane == 0) {
+ *width = format->width;
+ *height = format->height;
+ } else if ((plane == 1) || (plane == 2)) {
+ *width = format->width / 2;
+ *height = format->height;
+ } else {
+ vertex_err("Invalid plane(%d) for I422\n", plane);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ vertex_err("invalid format (%u)\n", format->format);
+ ret = -EINVAL;
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_hw_config(struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_format_list *incl;
+ struct vertex_format_list *otcl;
+ struct vertex_message msg;
+ struct vertex_set_graph_req *req;
+ unsigned int lcnt, fcnt;
+ unsigned int width, height;
+ int num_inputs, num_outputs;
+ size_t size;
+ dma_addr_t heap;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+ incl = (struct vertex_format_list *)itask->param2;
+ otcl = (struct vertex_format_list *)itask->param3;
+
+ msg.trans_id = itask->index;
+ msg.type = SET_GRAPH_REQ;
+
+ req = &msg.payload.set_graph_req;
+ req->graph_id = itask->param0;
+
+ /* Allocate and assign heap */
+ if (req->graph_id < SCENARIO_DE_CAPTURE) {
+ size = VERTEX_CM7_HEAP_SIZE_PREVIEW;
+ } else if (req->graph_id == SCENARIO_ENF ||
+ req->graph_id == SCENARIO_ENF_UV ||
+ req->graph_id == SCENARIO_ENF_YUV) {
+ size = VERTEX_CM7_HEAP_SIZE_ENF;
+ } else {
+ size = VERTEX_CM7_HEAP_SIZE_CAPTURE;
+ }
+ heap = vertex_memory_allocate_heap(&itf->system->memory, req->graph_id,
+ size);
+ if (IS_ERR_VALUE(heap)) {
+ ret = (int)heap;
+ goto p_err_heap;
+ }
+
+ req->p_temp = heap;
+ req->sz_temp = size;
+
+ /* TODO : How to update depth field */
+ for (lcnt = 0, num_inputs = 0; lcnt < incl->count; ++lcnt) {
+ for (fcnt = 0; fcnt < incl->formats[lcnt].plane; ++fcnt) {
+ ret = __vertex_interface_get_size_format(
+ &incl->formats[lcnt], fcnt,
+ &width, &height);
+ if (ret)
+ goto p_err_incl;
+
+ req->input_width[num_inputs] = width;
+ req->input_height[num_inputs] = height;
+ req->input_depth[num_inputs] = 0;
+ num_inputs++;
+ }
+ }
+ req->num_inputs = num_inputs;
+
+ for (lcnt = 0, num_outputs = 0; lcnt < otcl->count; ++lcnt) {
+ for (fcnt = 0; fcnt < otcl->formats[lcnt].plane; ++fcnt) {
+ ret = __vertex_interface_get_size_format(
+ &otcl->formats[lcnt], fcnt,
+ &width, &height);
+ if (ret)
+ goto p_err_otcl;
+
+ req->output_width[num_outputs] = width;
+ req->output_height[num_outputs] = height;
+ req->output_depth[num_outputs] = 0;
+ num_outputs++;
+ }
+ }
+ req->num_outputs = num_outputs;
+
+ itask->param0 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = VERTEX_MTYPE_H2F_NORMAL;
+
+ ret = __vertex_interface_send_command(itf, itask);
+ if (ret) {
+ vertex_err("Failed to send command (%d)\n", ret);
+ goto p_err_send;
+ }
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_pro_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ vertex_leave();
+ return 0;
+p_err_send:
+p_err_otcl:
+p_err_incl:
+ vertex_memory_free_heap(&itf->system->memory);
+p_err_heap:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_any_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ return ret;
+}
+
+int vertex_hw_process(struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_container_list *incl;
+ struct vertex_container_list *otcl;
+ struct vertex_message msg;
+ struct vertex_invoke_graph_req *req;
+ unsigned int lcnt, fcnt, count;
+ int num_inputs, num_outputs;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+ incl = itask->incl;
+ otcl = itask->otcl;
+
+ msg.trans_id = itask->index;
+ msg.type = INVOKE_GRAPH_REQ;
+
+ req = &msg.payload.invoke_graph_req;
+ req->graph_id = itask->param0;
+
+ /* TODO : We need to consider to support multi plain buffer */
+ for (lcnt = 0, num_inputs = 0; lcnt < incl->count; ++lcnt) {
+ for (fcnt = 0; fcnt < incl->containers[lcnt].count; ++fcnt) {
+ req->p_input[num_inputs] =
+ incl->containers[lcnt].buffers[fcnt].dvaddr;
+ num_inputs++;
+ }
+ }
+ req->num_inputs = num_inputs;
+
+ for (lcnt = 0, num_outputs = 0; lcnt < otcl->count; ++lcnt) {
+ for (fcnt = 0; fcnt < otcl->containers[lcnt].count; ++fcnt) {
+ req->p_output[num_outputs] =
+ otcl->containers[lcnt].buffers[fcnt].dvaddr;
+ num_outputs++;
+ }
+ }
+ req->num_outputs = num_outputs;
+
+ for (count = 0; count < MAX_NUM_OF_USER_PARAMS; ++count)
+ req->user_params[count] = incl->user_params[count];
+
+ itask->param0 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = VERTEX_MTYPE_H2F_NORMAL;
+
+#ifdef NON_BLOCK_INVOKE
+ ret = __vertex_interface_send_command_nonblocking(itf, itask);
+#else
+ ret = __vertex_interface_send_command(itf, itask);
+#endif
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_any_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ return ret;
+}
+
+int vertex_hw_create(struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_message msg;
+ struct vertex_create_graph_req *req;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.trans_id = itask->index;
+ msg.type = CREATE_GRAPH_REQ;
+
+ req = &msg.payload.create_graph_req;
+ req->p_graph = 0;
+ req->sz_graph = 0;
+
+ itask->param0 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = VERTEX_MTYPE_H2F_NORMAL;
+
+ ret = __vertex_interface_send_command(itf, itask);
+ if (ret)
+ goto p_err;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_pro_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ vertex_leave();
+ return 0;
+p_err:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_any_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ return ret;
+}
+
+int vertex_hw_destroy(struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_message msg;
+ struct vertex_destroy_graph_req *req;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.trans_id = itask->index;
+ msg.type = DESTROY_GRAPH_REQ;
+
+ req = &msg.payload.destroy_graph_req;
+ req->graph_id = itask->param0;
+
+ itask->param0 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = VERTEX_MTYPE_H2F_NORMAL;
+
+ ret = __vertex_interface_send_command(itf, itask);
+ if (ret)
+ goto p_err;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_pro_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ vertex_memory_free_heap(&itf->system->memory);
+
+ vertex_leave();
+ return 0;
+p_err:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_any_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ vertex_memory_free_heap(&itf->system->memory);
+ return ret;
+}
+
+int vertex_hw_init(struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_message msg;
+ struct vertex_init_req *req;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.trans_id = itask->index;
+ msg.type = INIT_REQ;
+
+ req = &msg.payload.init_req;
+ req->p_cc_heap = 0;
+ req->sz_cc_heap = 0;
+
+ itask->param0 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = VERTEX_MTYPE_H2F_URGENT;
+
+ ret = __vertex_interface_send_command(itf, itask);
+ if (ret)
+ goto p_err;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_pro_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ vertex_leave();
+ return 0;
+p_err:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_any_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ return ret;
+}
+
+int vertex_hw_deinit(struct vertex_interface *itf, struct vertex_task *itask)
+{
+ int ret;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_message msg;
+ struct vertex_powerdown_req *req;
+ unsigned long flags;
+
+ vertex_enter();
+ itaskmgr = &itf->taskmgr;
+
+ msg.trans_id = itask->index;
+ msg.type = POWER_DOWN_REQ;
+
+ req = &msg.payload.powerdown_req;
+ req->valid = 1;
+
+ itask->param0 = (unsigned long)&msg;
+ itask->param2 = sizeof(msg);
+ itask->param3 = VERTEX_MTYPE_H2F_URGENT;
+
+ ret = __vertex_interface_send_command(itf, itask);
+ if (ret)
+ goto p_err;
+
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_pro_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ vertex_leave();
+ return 0;
+p_err:
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_any_to_fre(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+ return ret;
+}
+
+static void __vertex_interface_work_list_queue_free(
+ struct vertex_work_list *work_list, struct vertex_work *work)
+{
+ unsigned long flags;
+
+ vertex_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ list_add_tail(&work->list, &work_list->free_head);
+ work_list->free_cnt++;
+ spin_unlock_irqrestore(&work_list->slock, flags);
+ vertex_leave();
+}
+
+static struct vertex_work *__vertex_interface_work_list_dequeue_free(
+ struct vertex_work_list *work_list)
+{
+ unsigned long flags;
+ struct vertex_work *work;
+
+ vertex_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ if (work_list->free_cnt) {
+ work = list_first_entry(&work_list->free_head,
+ struct vertex_work, list);
+ list_del(&work->list);
+ work_list->free_cnt--;
+ } else {
+ work = NULL;
+ vertex_err("free work list empty\n");
+ }
+ spin_unlock_irqrestore(&work_list->slock, flags);
+
+ vertex_leave();
+ return work;
+}
+
+static void __vertex_interface_work_list_queue_reply(
+ struct vertex_work_list *work_list, struct vertex_work *work)
+{
+ unsigned long flags;
+
+ vertex_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ list_add_tail(&work->list, &work_list->reply_head);
+ work_list->reply_cnt++;
+ spin_unlock_irqrestore(&work_list->slock, flags);
+ vertex_leave();
+}
+
+static struct vertex_work *__vertex_interface_work_list_dequeue_reply(
+ struct vertex_work_list *work_list)
+{
+ unsigned long flags;
+ struct vertex_work *work;
+
+ vertex_enter();
+ spin_lock_irqsave(&work_list->slock, flags);
+ if (work_list->reply_cnt) {
+ work = list_first_entry(&work_list->reply_head,
+ struct vertex_work, list);
+ list_del(&work->list);
+ work_list->reply_cnt--;
+ } else {
+ work = NULL;
+ }
+ spin_unlock_irqrestore(&work_list->slock, flags);
+
+ vertex_leave();
+ return work;
+}
+
+static void __vertex_interface_work_list_init(
+ struct vertex_work_list *work_list, unsigned int count)
+{
+ unsigned int idx;
+
+ vertex_enter();
+ spin_lock_init(&work_list->slock);
+ INIT_LIST_HEAD(&work_list->free_head);
+ work_list->free_cnt = 0;
+ INIT_LIST_HEAD(&work_list->reply_head);
+ work_list->reply_cnt = 0;
+
+ for (idx = 0; idx < count; ++idx)
+ __vertex_interface_work_list_queue_free(work_list,
+ &work_list->work[idx]);
+
+ vertex_leave();
+}
+
+static void __interface_isr(void *data)
+{
+ struct vertex_interface *itf;
+ struct vertex_mailbox_ctrl *mctrl;
+ struct vertex_work_list *work_list;
+ struct work_struct *work_queue;
+ struct vertex_work *work;
+ struct vertex_message rsp;
+
+ vertex_enter();
+ itf = (struct vertex_interface *)data;
+ mctrl = itf->mctrl;
+ work_list = &itf->work_list;
+ work_queue = &itf->work_queue;
+
+ while (!vertex_mbox_check_reply(mctrl, VERTEX_MTYPE_F2H_URGENT, 0)) {
+ vertex_mbox_read(mctrl, VERTEX_MTYPE_F2H_URGENT, &rsp);
+
+ if (rsp.type == BOOTUP_RSP) {
+ if (rsp.payload.bootup_rsp.error == 0) {
+ vertex_dbg("CM7 bootup complete (%u)\n",
+ rsp.trans_id);
+ set_bit(VERTEX_ITF_STATE_BOOTUP, &itf->state);
+ } else {
+ /* TODO process boot fail */
+ vertex_err("CM7 bootup failed (%u,%d)\n",
+ rsp.trans_id,
+ rsp.payload.bootup_rsp.error);
+ }
+ break;
+ }
+
+ work = __vertex_interface_work_list_dequeue_free(work_list);
+ if (work) {
+ /* TODO */
+ work->id = rsp.trans_id;
+ work->message = 0;
+ work->param0 = 0;
+ work->param1 = 0;
+ work->param2 = 0;
+ work->param3 = 0;
+
+ __vertex_interface_work_list_queue_reply(work_list,
+ work);
+
+ if (!work_pending(work_queue))
+ schedule_work(work_queue);
+ }
+ }
+
+ while (!vertex_mbox_check_reply(mctrl, VERTEX_MTYPE_F2H_NORMAL, 0)) {
+ vertex_mbox_read(mctrl, VERTEX_MTYPE_F2H_NORMAL, &rsp);
+
+ if (rsp.type == TEST_RSP) {
+ enter_process_barrier(itf);
+ if (!itf->process) {
+ vertex_err("process task is empty\n");
+ exit_process_barrier(itf);
+ continue;
+ }
+
+ vertex_dbg("test response %x\n",
+ rsp.payload.test_rsp.error);
+ itf->process->message = VERTEX_TASK_DONE;
+ wake_up(&itf->reply_wq);
+ exit_process_barrier(itf);
+ continue;
+ }
+
+ work = __vertex_interface_work_list_dequeue_free(work_list);
+ if (work) {
+ /* TODO */
+ work->id = rsp.trans_id;
+ work->message = 0;
+ work->param0 = 0;
+ work->param1 = 0;
+ work->param2 = 0;
+ work->param3 = 0;
+
+ __vertex_interface_work_list_queue_reply(work_list,
+ work);
+
+ if (!work_pending(work_queue))
+ schedule_work(work_queue);
+ }
+ }
+
+ vertex_leave();
+}
+
+irqreturn_t vertex_interface_isr0(int irq, void *data)
+{
+ vertex_enter();
+ __interface_isr(data);
+ vertex_leave();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t vertex_interface_isr1(int irq, void *data)
+{
+ vertex_enter();
+ __interface_isr(data);
+ vertex_leave();
+ return IRQ_HANDLED;
+}
+
+#ifdef VERTEX_MBOX_EMULATOR
+#define MBOX_TURN_AROUND_TIME (HZ / 20)
+
+extern int vertex_mbox_emul_handler(struct vertex_mailbox_ctrl *mctrl);
+
+static void __mbox_timer(unsigned long data)
+{
+ struct vertex_interface *itf;
+ struct vertex_mailbox_ctrl *mctrl;
+ int ret;
+ unsigned int random;
+
+ vertex_enter();
+ itf = (struct vertex_interface *)data;
+ mctrl = itf->private_data;
+
+ if (!test_bit(VERTEX_ITF_STATE_BOOTUP, &itf->state))
+ goto exit;
+
+ ret = vertex_emul_mbox_handler(mctrl);
+ if (ret)
+ __interface_isr(itf);
+
+exit:
+ get_random_bytes(&random, sizeof(random));
+ random = random % MBOX_TURN_AROUND_TIME;
+ mod_timer(&itf->timer, jiffies + random);
+ vertex_leave();
+}
+#endif
+
+int vertex_interface_start(struct vertex_interface *itf)
+{
+ vertex_enter();
+#ifdef VERTEX_MBOX_EMULATOR
+ init_timer(&itf->timer);
+ itf->timer.expires = jiffies + MBOX_TURN_AROUND_TIME;
+ itf->timer.data = (unsigned long)itf;
+ itf->timer.function = __mbox_timer;
+ add_timer(&itf->timer);
+#endif
+ set_bit(VERTEX_ITF_STATE_START, &itf->state);
+
+ vertex_leave();
+ return 0;
+}
+
+static void __vertex_interface_cleanup(struct vertex_interface *itf)
+{
+ struct vertex_taskmgr *taskmgr;
+ int task_count;
+
+ unsigned long flags;
+ unsigned int idx;
+ struct vertex_task *task;
+
+ vertex_enter();
+ taskmgr = &itf->taskmgr;
+
+ task_count = taskmgr->req_cnt + taskmgr->pro_cnt + taskmgr->com_cnt;
+ if (task_count) {
+ vertex_warn("count of task manager is not zero (%u/%u/%u)\n",
+ taskmgr->req_cnt, taskmgr->pro_cnt,
+ taskmgr->com_cnt);
+ /* TODO remove debug log */
+ vertex_task_print_all(&itf->taskmgr);
+
+ taskmgr_e_barrier_irqs(taskmgr, 0, flags);
+
+ for (idx = 1; idx < taskmgr->tot_cnt; ++idx) {
+ task = &taskmgr->task[idx];
+ if (task->state == VERTEX_TASK_STATE_FREE)
+ continue;
+
+ vertex_task_trans_any_to_fre(taskmgr, task);
+ task_count--;
+ }
+
+ taskmgr_x_barrier_irqr(taskmgr, 0, flags);
+ }
+}
+
+int vertex_interface_stop(struct vertex_interface *itf)
+{
+ vertex_enter();
+ __vertex_interface_cleanup(itf);
+#ifdef VERTEX_MBOX_EMULATOR
+ del_timer_sync(&itf->timer);
+#endif
+ clear_bit(VERTEX_ITF_STATE_BOOTUP, &itf->state);
+ clear_bit(VERTEX_ITF_STATE_START, &itf->state);
+
+ vertex_leave();
+ return 0;
+}
+
+int vertex_interface_open(struct vertex_interface *itf, void *mbox)
+{
+ int idx;
+
+ vertex_enter();
+ itf->mctrl = mbox;
+
+ itf->process = NULL;
+ for (idx = 0; idx < VERTEX_MAX_GRAPH; ++idx) {
+ itf->request[idx] = NULL;
+ itf->reply[idx].valid = 0;
+ }
+
+ itf->done_cnt = 0;
+ itf->state = 0;
+ set_bit(VERTEX_ITF_STATE_OPEN, &itf->state);
+
+ vertex_leave();
+ return 0;
+}
+
+int vertex_interface_close(struct vertex_interface *itf)
+{
+ vertex_enter();
+ itf->mctrl = NULL;
+ clear_bit(VERTEX_ITF_STATE_OPEN, &itf->state);
+
+ vertex_leave();
+ return 0;
+}
+
+static void vertex_interface_work_reply_func(struct work_struct *data)
+{
+ struct vertex_interface *itf;
+ struct vertex_taskmgr *itaskmgr;
+ struct vertex_work_list *work_list;
+ struct vertex_work *work;
+ struct vertex_task *itask;
+ unsigned int gidx;
+ unsigned long flags;
+
+ vertex_enter();
+ itf = container_of(data, struct vertex_interface, work_queue);
+ itaskmgr = &itf->taskmgr;
+ work_list = &itf->work_list;
+
+ while (true) {
+ work = __vertex_interface_work_list_dequeue_reply(work_list);
+ if (!work)
+ break;
+
+ if (work->id >= VERTEX_MAX_TASK) {
+ vertex_err("work id is invalid (%d)\n", work->id);
+ goto p_end;
+ }
+
+ itask = &itaskmgr->task[work->id];
+
+ if (itask->state != VERTEX_TASK_STATE_PROCESS) {
+ vertex_err("task(%u/%u/%lu) state(%u) is invalid\n",
+ itask->message, itask->index,
+ itask->param1, itask->state);
+ vertex_err("work(%u/%u/%u)\n", work->id,
+ work->param0, work->param1);
+ vertex_task_print_all(&itf->taskmgr);
+ goto p_end;
+ }
+
+ switch (itask->message) {
+ case VERTEX_TASK_INIT:
+ case VERTEX_TASK_DEINIT:
+ case VERTEX_TASK_CREATE:
+ case VERTEX_TASK_DESTROY:
+ case VERTEX_TASK_ALLOCATE:
+ case VERTEX_TASK_REQUEST:
+ gidx = itask->param1;
+ itf->reply[gidx] = *work;
+ __vertex_interface_set_reply(itf, gidx);
+ break;
+ case VERTEX_TASK_PROCESS:
+#ifndef NON_BLOCK_INVOKE
+ gidx = itask->param1;
+ itf->reply[gidx] = *work;
+ __vertex_interface_set_reply(itf, gidx);
+#endif
+ taskmgr_e_barrier_irqs(itaskmgr, 0, flags);
+ vertex_task_trans_pro_to_com(itaskmgr, itask);
+ taskmgr_x_barrier_irqr(itaskmgr, 0, flags);
+
+ itask->param2 = 0;
+ itask->param3 = 0;
+ vertex_graphmgr_queue(itf->cookie, itask);
+ itf->done_cnt++;
+ break;
+ default:
+ vertex_err("unresolved task message(%d) have arrived\n",
+ work->message);
+ break;
+ }
+p_end:
+ __vertex_interface_work_list_queue_free(work_list, work);
+ };
+ vertex_leave();
+}
+
+/* TODO temp code */
+void *vertex_interface_data;
+
+int vertex_interface_probe(struct vertex_system *sys)
+{
+ int ret;
+ struct platform_device *pdev;
+ struct vertex_interface *itf;
+ struct device *dev;
+ struct vertex_taskmgr *taskmgr;
+
+ vertex_enter();
+ pdev = to_platform_device(sys->dev);
+ itf = &sys->interface;
+ itf->system = sys;
+ dev = &pdev->dev;
+
+ vertex_interface_data = itf;
+ itf->regs = sys->reg_ss[VERTEX_REG_SS1];
+
+ ret = vertex_slab_init(&itf->slab);
+ if (ret)
+ goto p_err_slab;
+
+ taskmgr = &itf->taskmgr;
+ spin_lock_init(&taskmgr->slock);
+ ret = vertex_task_init(taskmgr, VERTEX_MAX_GRAPH, itf);
+ if (ret)
+ goto p_err_taskmgr;
+
+ itf->cookie = &sys->graphmgr;
+ itf->state = 0;
+
+ init_waitqueue_head(&itf->reply_wq);
+ spin_lock_init(&itf->process_barrier);
+
+ INIT_WORK(&itf->work_queue, vertex_interface_work_reply_func);
+ __vertex_interface_work_list_init(&itf->work_list,
+ VERTEX_WORK_MAX_COUNT);
+
+ vertex_leave();
+ return 0;
+p_err_taskmgr:
+ vertex_slab_deinit(&itf->slab);
+p_err_slab:
+ return ret;
+}
+
+void vertex_interface_remove(struct vertex_interface *itf)
+{
+ vertex_task_deinit(&itf->taskmgr);
+ vertex_slab_deinit(&itf->slab);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_INTERFACE_H__
+#define __VERTEX_INTERFACE_H__
+
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+
+#include "vertex-config.h"
+#include "platform/vertex-ctrl.h"
+#include "vertex-slab.h"
+#include "vertex-taskmgr.h"
+
+#define VERTEX_WORK_MAX_COUNT (20)
+#define VERTEX_WORK_MAX_DATA (24)
+#define VERTEX_COMMAND_TIMEOUT (3 * HZ)
+
+struct vertex_system;
+
+enum vertex_work_message {
+ VERTEX_WORK_MTYPE_BLK,
+ VERTEX_WORK_MTYPE_NBLK,
+};
+
+enum vertex_interface_state {
+ VERTEX_ITF_STATE_OPEN,
+ VERTEX_ITF_STATE_BOOTUP,
+ VERTEX_ITF_STATE_START
+};
+
+struct vertex_work {
+ struct list_head list;
+ unsigned int valid;
+ unsigned int id;
+ enum vertex_work_message message;
+ unsigned int param0;
+ unsigned int param1;
+ unsigned int param2;
+ unsigned int param3;
+ unsigned char data[VERTEX_WORK_MAX_DATA];
+};
+
+struct vertex_work_list {
+ struct vertex_work work[VERTEX_WORK_MAX_COUNT];
+ spinlock_t slock;
+ struct list_head free_head;
+ unsigned int free_cnt;
+ struct list_head reply_head;
+ unsigned int reply_cnt;
+};
+
+struct vertex_interface {
+ int irq[VERTEX_REG_MAX];
+ void __iomem *regs;
+ struct vertex_slab_allocator slab;
+ struct vertex_taskmgr taskmgr;
+ void *cookie;
+ unsigned long state;
+
+ wait_queue_head_t reply_wq;
+ spinlock_t process_barrier;
+ struct vertex_work_list work_list;
+ struct work_struct work_queue;
+
+ struct vertex_task *request[VERTEX_MAX_GRAPH];
+ struct vertex_task *process;
+ struct vertex_work reply[VERTEX_MAX_GRAPH];
+ unsigned int done_cnt;
+
+ struct vertex_mailbox_ctrl *mctrl;
+#ifdef VERTEX_MBOX_EMULATOR
+ struct timer_list timer;
+#endif
+
+ struct vertex_system *system;
+};
+
+int vertex_hw_wait_bootup(struct vertex_interface *itf);
+
+int vertex_hw_config(struct vertex_interface *itf, struct vertex_task *itask);
+int vertex_hw_process(struct vertex_interface *itf, struct vertex_task *itask);
+int vertex_hw_create(struct vertex_interface *itf, struct vertex_task *itask);
+int vertex_hw_destroy(struct vertex_interface *itf, struct vertex_task *itask);
+int vertex_hw_init(struct vertex_interface *itf, struct vertex_task *itask);
+int vertex_hw_deinit(struct vertex_interface *itf, struct vertex_task *itask);
+
+int vertex_interface_start(struct vertex_interface *itf);
+int vertex_interface_stop(struct vertex_interface *itf);
+int vertex_interface_open(struct vertex_interface *itf, void *mbox);
+int vertex_interface_close(struct vertex_interface *itf);
+
+int vertex_interface_probe(struct vertex_system *sys);
+void vertex_interface_remove(struct vertex_interface *itf);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-debug.h"
+#include "vertex-io.h"
+
+void *vertex_io_copy_mem2io(void *dst, void *src, size_t size)
+{
+ unsigned char *src8;
+ unsigned char *dst8;
+
+#ifdef DEBUG_LOG_MAILBOX_DUMP
+ vertex_debug_memdump("mem2io", src, size);
+#endif
+
+ src8 = (unsigned char *)(src);
+ dst8 = (unsigned char *)(dst);
+
+ /* first copy byte by byte till the source first alignment
+ * this step is necessary to ensure we do not even try to access
+ * data which is before the source buffer, hence it is not ours.
+ */
+ /* complete the left overs */
+ while (size--) {
+ IOW8(*dst8, *src8);
+ dst8++;
+ src8++;
+ }
+
+ return dst;
+}
+
+void *vertex_io_copy_io2mem(void *dst, void *src, size_t size)
+{
+ unsigned char *src8;
+ unsigned char *dst8;
+
+#ifdef DEBUG_LOG_MAILBOX_DUMP
+ vertex_debug_memdump("io2mem", src, size);
+#endif
+
+ src8 = (unsigned char *)(src);
+ dst8 = (unsigned char *)(dst);
+
+ while (size--) {
+ *dst8 = IOR8(*src8);
+ dst8++;
+ src8++;
+ }
+
+ return dst;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_IO_H__
+#define __VERTEX_IO_H__
+
+#include <linux/io.h>
+
+#define IOR8(port) readb((const void *)&port)
+#define IOR16(port) readw((const void *)&port)
+#define IOR32(port) readl((const void *)&port)
+#define IOR64(port) readq((const void *)&port)
+
+#ifdef DEBUG_LOG_IO_WRITE
+#define IOW8(port, val) \
+ do { \
+ vertex_dbg("ADDR: %p, VAL: 0x%02x\r\n", &port, val); \
+ writeb(val, &port); \
+ } while (0)
+#define IOW16(port, val) \
+ do { \
+ vertex_dbg("ADDR: %p, VAL: 0x%04x\r\n", &port, val); \
+ writew(val, &port); \
+ } while (0)
+#define IOW32(port, val) \
+ do { \
+ vertex_dbg("ADDR: %p, VAL: 0x%08x\r\n", &port, val); \
+ writel(val, &port); \
+ } while (0)
+#define IOW64(port, val) \
+ do { \
+ vertex_dbg("ADDR: %p, VAL: 0x%016llx\r\n", &port, val); \
+ writeq(val, &port); \
+ } while (0)
+#else
+#define IOW8(port, val) writeb(val, &port)
+#define IOW16(port, val) writew(val, &port)
+#define IOW32(port, val) writel(val, &port)
+#define IOW64(port, val) writeq(val, &port)
+#endif
+
+void *vertex_io_copy_mem2io(void *dst, void *src, size_t size);
+void *vertex_io_copy_io2mem(void *dst, void *src, size_t size);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+
+#include "vertex-log.h"
+#include "vertex-core.h"
+#include "vertex-context.h"
+#include "vertex-ioctl.h"
+
+static int __vertex_ioctl_get_graph(struct vs4l_graph *karg,
+ struct vs4l_graph __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vertex_err("Copy failed [S_GRAPH] (%d)\n", ret);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_graph(struct vs4l_graph *karg,
+ struct vs4l_graph __user *uarg)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_format(struct vs4l_format_list *karg,
+ struct vs4l_format_list __user *uarg)
+{
+ int ret;
+ size_t size;
+ struct vs4l_format __user *uformat;
+ struct vs4l_format *kformat;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vertex_err("Copy failed [S_FORMAT] (%d)\n", ret);
+ goto p_err;
+ }
+
+ uformat = karg->formats;
+
+ size = karg->count * sizeof(*kformat);
+ kformat = kzalloc(size, GFP_KERNEL);
+ if (!kformat) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kformat (%zu)\n", size);
+ goto p_err;
+ }
+
+ ret = copy_from_user(kformat, uformat, size);
+ if (ret) {
+ vertex_err("Copy failed [S_FORMAT] (%d)\n", ret);
+ goto p_err_free;
+ }
+
+ karg->formats = kformat;
+
+ vertex_leave();
+ return 0;
+p_err_free:
+ kfree(kformat);
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_format(struct vs4l_format_list *karg,
+ struct vs4l_format_list __user *uarg)
+{
+ vertex_enter();
+ kfree(karg->formats);
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_param(struct vs4l_param_list *karg,
+ struct vs4l_param_list __user *uarg)
+{
+ int ret;
+ size_t size;
+ struct vs4l_param __user *uparam;
+ struct vs4l_param *kparam;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vertex_err("Copy failed [S_PARAM] (%d)\n", ret);
+ goto p_err;
+ }
+
+ uparam = karg->params;
+
+ size = karg->count * sizeof(*kparam);
+ kparam = kzalloc(size, GFP_KERNEL);
+ if (!kparam) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kparam (%zu)\n", size);
+ goto p_err;
+ }
+
+ ret = copy_from_user(kparam, uparam, size);
+ if (ret) {
+ vertex_err("Copy failed [S_PARAM] (%d)\n", ret);
+ goto p_err_free;
+ }
+
+ karg->params = kparam;
+ vertex_leave();
+ return 0;
+p_err_free:
+ kfree(kparam);
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_param(struct vs4l_param_list *karg,
+ struct vs4l_param_list __user *uarg)
+{
+ vertex_enter();
+ kfree(karg->params);
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_ctrl(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vertex_err("Copy failed [S_CTRL] (%d)\n", ret);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_ctrl(struct vs4l_ctrl *karg,
+ struct vs4l_ctrl __user *uarg)
+{
+ vertex_enter();
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_container(struct vs4l_container_list *karg,
+ struct vs4l_container_list __user *uarg)
+{
+ int ret;
+ size_t size;
+ unsigned int idx;
+ struct vs4l_container *ucon;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer *ubuf;
+ struct vs4l_buffer *kbuf;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*karg));
+ if (ret) {
+ vertex_err("Copy failed [CONTAINER] (%d)\n", ret);
+ goto p_err;
+ }
+
+ ucon = karg->containers;
+
+ size = karg->count * sizeof(*kcon);
+ kcon = kzalloc(size, GFP_KERNEL);
+ if (!kcon) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kcon (%zu)\n", size);
+ goto p_err;
+ }
+
+ karg->containers = kcon;
+
+ ret = copy_from_user(kcon, ucon, size);
+ if (ret) {
+ vertex_err("Copy failed [CONTAINER] (%d)\n", ret);
+ goto p_err_free;
+ }
+
+ for (idx = 0; idx < karg->count; ++idx) {
+ ubuf = kcon[idx].buffers;
+
+ size = kcon[idx].count * sizeof(*kbuf);
+ kbuf = kzalloc(size, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kbuf (%zu)\n", size);
+ goto p_err_free;
+ }
+
+ kcon[idx].buffers = kbuf;
+
+ ret = copy_from_user(kbuf, ubuf, size);
+ if (ret) {
+ vertex_err("Copy failed [CONTAINER] (%d)\n", ret);
+ goto p_err_free;
+ }
+ }
+
+ vertex_leave();
+ return 0;
+p_err_free:
+ for (idx = 0; idx < karg->count; ++idx)
+ kfree(kcon[idx].buffers);
+
+ kfree(kcon);
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_container(struct vs4l_container_list *karg,
+ struct vs4l_container_list __user *uarg)
+{
+ unsigned int idx;
+
+ vertex_enter();
+ if (put_user(karg->id, &uarg->id) ||
+ put_user(karg->index, &uarg->index) ||
+ put_user(karg->flags, &uarg->flags) ||
+ copy_to_user(uarg->timestamp, karg->timestamp,
+ sizeof(karg->timestamp)) ||
+ copy_to_user(uarg->user_params, karg->user_params,
+ sizeof(karg->user_params))) {
+ vertex_err("Copy failed to user [CONTAINER]\n");
+ }
+
+ for (idx = 0; idx < karg->count; ++idx)
+ kfree(karg->containers[idx].buffers);
+
+ kfree(karg->containers);
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_load_kernel_binary(
+ struct vertex_ioc_load_kernel_binary *karg,
+ struct vertex_ioc_load_kernel_binary __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vertex_err("Copy failed [Load kernel binary] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_load_kernel_binary(
+ struct vertex_ioc_load_kernel_binary *karg,
+ struct vertex_ioc_load_kernel_binary __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vertex_err("Copy failed to user [Load kernel binary]\n");
+
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_load_graph_info(
+ struct vertex_ioc_load_graph_info *karg,
+ struct vertex_ioc_load_graph_info __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vertex_err("Copy failed [Load graph info] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_load_graph_info(
+ struct vertex_ioc_load_graph_info *karg,
+ struct vertex_ioc_load_graph_info __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vertex_err("Copy failed to user [Load graph info]\n");
+
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_unload_graph_info(
+ struct vertex_ioc_unload_graph_info *karg,
+ struct vertex_ioc_unload_graph_info __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vertex_err("Copy failed [Unload graph info] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_unload_graph_info(
+ struct vertex_ioc_unload_graph_info *karg,
+ struct vertex_ioc_unload_graph_info __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vertex_err("Copy failed to user [Unload graph info]\n");
+
+ vertex_leave();
+}
+
+static int __vertex_ioctl_get_execute_submodel(
+ struct vertex_ioc_execute_submodel *karg,
+ struct vertex_ioc_execute_submodel __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_from_user(karg, uarg, sizeof(*uarg));
+ if (ret) {
+ vertex_err("Copy failed [Execute submodel] (%d)\n", ret);
+ goto p_err;
+ }
+
+ memset(karg->timestamp, 0, sizeof(karg->timestamp));
+ memset(karg->reserved, 0, sizeof(karg->reserved));
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_ioctl_put_execute_submodel(
+ struct vertex_ioc_execute_submodel *karg,
+ struct vertex_ioc_execute_submodel __user *uarg)
+{
+ int ret;
+
+ vertex_enter();
+ ret = copy_to_user(uarg, karg, sizeof(*karg));
+ if (ret)
+ vertex_err("Copy failed to user [Execute submodel]\n");
+
+ vertex_leave();
+}
+
+long vertex_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct vertex_context *vctx;
+ const struct vertex_ioctl_ops *ops;
+ union vertex_ioc_arg karg;
+ void __user *uarg;
+
+ vertex_enter();
+ vctx = file->private_data;
+ ops = vctx->core->ioc_ops;
+ uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case VS4L_VERTEXIOC_S_GRAPH:
+ ret = __vertex_ioctl_get_graph(&karg.graph, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_graph(vctx, &karg.graph);
+ __vertex_ioctl_put_graph(&karg.graph, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_FORMAT:
+ ret = __vertex_ioctl_get_format(&karg.flist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_format(vctx, &karg.flist);
+ __vertex_ioctl_put_format(&karg.flist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_PARAM:
+ ret = __vertex_ioctl_get_param(&karg.plist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_param(vctx, &karg.plist);
+ __vertex_ioctl_put_param(&karg.plist, uarg);
+ break;
+ case VS4L_VERTEXIOC_S_CTRL:
+ ret = __vertex_ioctl_get_ctrl(&karg.ctrl, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->set_ctrl(vctx, &karg.ctrl);
+ __vertex_ioctl_put_ctrl(&karg.ctrl, uarg);
+ break;
+ case VS4L_VERTEXIOC_STREAM_ON:
+ ret = ops->streamon(vctx);
+ break;
+ case VS4L_VERTEXIOC_STREAM_OFF:
+ ret = ops->streamoff(vctx);
+ break;
+ case VS4L_VERTEXIOC_QBUF:
+ ret = __vertex_ioctl_get_container(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->qbuf(vctx, &karg.clist);
+ __vertex_ioctl_put_container(&karg.clist, uarg);
+ break;
+ case VS4L_VERTEXIOC_DQBUF:
+ ret = __vertex_ioctl_get_container(&karg.clist, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->dqbuf(vctx, &karg.clist);
+ __vertex_ioctl_put_container(&karg.clist, uarg);
+ break;
+ case VERTEX_IOC_LOAD_KERNEL_BINARY:
+ ret = __vertex_ioctl_get_load_kernel_binary(&karg.kernel_bin,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_kernel_binary(vctx, &karg.kernel_bin);
+ __vertex_ioctl_put_load_kernel_binary(&karg.kernel_bin, uarg);
+ break;
+ case VERTEX_IOC_LOAD_GRAPH_INFO:
+ ret = __vertex_ioctl_get_load_graph_info(&karg.load_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->load_graph_info(vctx, &karg.load_ginfo);
+ __vertex_ioctl_put_load_graph_info(&karg.load_ginfo, uarg);
+ break;
+ case VERTEX_IOC_UNLOAD_GRAPH_INFO:
+ ret = __vertex_ioctl_get_unload_graph_info(&karg.unload_ginfo,
+ uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->unload_graph_info(vctx, &karg.unload_ginfo);
+ __vertex_ioctl_put_unload_graph_info(&karg.unload_ginfo, uarg);
+ break;
+ case VERTEX_IOC_EXECUTE_SUBMODEL:
+ ret = __vertex_ioctl_get_execute_submodel(&karg.exec, uarg);
+ if (ret)
+ goto p_err;
+
+ ret = ops->execute_submodel(vctx, &karg.exec);
+ __vertex_ioctl_put_execute_submodel(&karg.exec, uarg);
+ break;
+
+ default:
+ ret = -EINVAL;
+ vertex_err("ioc command(%x) is not supported\n", cmd);
+ goto p_err;
+ }
+
+ vertex_leave();
+p_err:
+ return ret;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_IOCTL_H__
+#define __VERTEX_IOCTL_H__
+
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "vs4l.h"
+#include "vertex-common-type.h"
+
+#define VERTEX_CTRL_VERTEX_BASE (0x00010000)
+#define VERTEX_CTRL_DUMP (VERTEX_CTRL_VERTEX_BASE + 1)
+#define VERTEX_CTRL_MODE (VERTEX_CTRL_VERTEX_BASE + 2)
+#define VERTEX_CTRL_TEST (VERTEX_CTRL_VERTEX_BASE + 3)
+
+struct vertex_context;
+
+struct vertex_ioc_load_kernel_binary {
+ unsigned int size;
+ unsigned int global_id;
+ int kernel_fd;
+ unsigned int kernel_size;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vertex_ioc_load_graph_info {
+ unsigned int size;
+ struct vertex_common_graph_info graph_info;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vertex_ioc_unload_graph_info {
+ unsigned int size;
+ struct vertex_common_graph_info graph_info;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+struct vertex_ioc_execute_submodel {
+ unsigned int size;
+ struct vertex_common_execute_info execute_info;
+ int ret;
+ struct timespec timestamp[4];
+ int reserved[2];
+};
+
+#define VERTEX_IOC_LOAD_KERNEL_BINARY \
+ _IOWR('V', 0, struct vertex_ioc_load_kernel_binary)
+#define VERTEX_IOC_LOAD_GRAPH_INFO \
+ _IOWR('V', 1, struct vertex_ioc_load_graph_info)
+#define VERTEX_IOC_UNLOAD_GRAPH_INFO \
+ _IOWR('V', 2, struct vertex_ioc_unload_graph_info)
+#define VERTEX_IOC_EXECUTE_SUBMODEL \
+ _IOWR('V', 3, struct vertex_ioc_execute_submodel)
+
+union vertex_ioc_arg {
+ struct vs4l_graph graph;
+ struct vs4l_format_list flist;
+ struct vs4l_param_list plist;
+ struct vs4l_ctrl ctrl;
+ struct vs4l_container_list clist;
+
+ struct vertex_ioc_load_kernel_binary kernel_bin;
+ struct vertex_ioc_load_graph_info load_ginfo;
+ struct vertex_ioc_unload_graph_info unload_ginfo;
+ struct vertex_ioc_execute_submodel exec;
+};
+
+struct vertex_ioctl_ops {
+ /* TODO: vs4l(temp) */
+ int (*set_graph)(struct vertex_context *vctx,
+ struct vs4l_graph *graph);
+ int (*set_format)(struct vertex_context *vctx,
+ struct vs4l_format_list *flist);
+ int (*set_param)(struct vertex_context *vctx,
+ struct vs4l_param_list *plist);
+ int (*set_ctrl)(struct vertex_context *vctx,
+ struct vs4l_ctrl *ctrl);
+ int (*qbuf)(struct vertex_context *vctx,
+ struct vs4l_container_list *clist);
+ int (*dqbuf)(struct vertex_context *vctx,
+ struct vs4l_container_list *clist);
+ int (*streamon)(struct vertex_context *vctx);
+ int (*streamoff)(struct vertex_context *vctx);
+ /* dal */
+ int (*load_kernel_binary)(struct vertex_context *vctx,
+ struct vertex_ioc_load_kernel_binary *args);
+ int (*load_graph_info)(struct vertex_context *vctx,
+ struct vertex_ioc_load_graph_info *args);
+ int (*unload_graph_info)(struct vertex_context *vctx,
+ struct vertex_ioc_unload_graph_info *args);
+ int (*execute_submodel)(struct vertex_context *vctx,
+ struct vertex_ioc_execute_submodel *args);
+};
+
+long vertex_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#if defined(CONFIG_COMPAT)
+long vertex_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+static inline long vertex_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg) { return 0; };
+#endif
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-kernel-binary.h"
+
+int vertex_kernel_binary_add(struct vertex_context *vctx, unsigned int id,
+ int fd, unsigned int size)
+{
+ int ret;
+ struct vertex_kernel_binary *kbin;
+ struct vertex_memory *mem;
+ unsigned long flags;
+
+ vertex_enter();
+ kbin = kzalloc(sizeof(*kbin), GFP_KERNEL);
+ if (!kbin) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc kernel binary\n");
+ goto p_err;
+ }
+ kbin->global_id = id;
+ kbin->buffer.m.fd = fd;
+ kbin->buffer.size = size;
+
+ mem = &vctx->core->system->memory;
+ ret = mem->mops->map_dmabuf(mem, &kbin->buffer);
+ if (ret)
+ goto p_err_map;
+
+ spin_lock_irqsave(&vctx->binary_slock, flags);
+ vctx->binary_count++;
+ list_add_tail(&kbin->list, &vctx->binary_list);
+ spin_unlock_irqrestore(&vctx->binary_slock, flags);
+
+ kbin->vctx = vctx;
+ vertex_leave();
+ return 0;
+p_err_map:
+ kfree(kbin);
+p_err:
+ return ret;
+}
+
+void vertex_kernel_binary_remove(struct vertex_kernel_binary *kbin)
+{
+ struct vertex_context *vctx;
+ unsigned long flags;
+ struct vertex_memory *mem;
+
+ vertex_enter();
+ vctx = kbin->vctx;
+
+ spin_lock_irqsave(&vctx->binary_slock, flags);
+ list_del(&kbin->list);
+ vctx->binary_count--;
+ spin_unlock_irqrestore(&vctx->binary_slock, flags);
+
+ mem = &vctx->core->system->memory;
+ mem->mops->unmap_dmabuf(mem, &kbin->buffer);
+
+ kfree(kbin);
+
+ vertex_leave();
+}
+
+void vertex_kernel_binary_all_remove(struct vertex_context *vctx)
+{
+ struct vertex_kernel_binary *kbin, *temp;
+
+ vertex_enter();
+ list_for_each_entry_safe(kbin, temp, &vctx->binary_list, list) {
+ vertex_kernel_binary_remove(kbin);
+ }
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_KERNEL_BINARY_H__
+#define __VERTEX_KERNEL_BINARY_H__
+
+#include "vertex-context.h"
+#include "vertex-memory.h"
+
+struct vertex_kernel_binary {
+ unsigned int global_id;
+ struct vertex_buffer buffer;
+ struct list_head list;
+
+ struct vertex_context *vctx;
+};
+
+int vertex_kernel_binary_add(struct vertex_context *vctx, unsigned int id,
+ int fd, unsigned int size);
+void vertex_kernel_binary_remove(struct vertex_kernel_binary *kbin);
+void vertex_kernel_binary_all_remove(struct vertex_context *vctx);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_LOG_H__
+#define __VERTEX_LOG_H__
+
+#include <linux/kernel.h>
+
+#include "vertex-config.h"
+
+#define vertex_err(fmt, args...) \
+ pr_err("[VIPx-VERTEX][ERR](%d):" fmt, __LINE__, ##args)
+
+#define vertex_warn(fmt, args...) \
+ pr_warn("[VIPx-VERTEX][WRN](%d):" fmt, __LINE__, ##args)
+
+#define vertex_info(fmt, args...) \
+ pr_info("[VIPx-VERTEX]:" fmt, ##args)
+
+#define vertex_dbg(fmt, args...) \
+ pr_info("[VIPx-VERTEX][DBG](%d):" fmt, __LINE__, ##args)
+
+#if defined(DEBUG_LOG_CALL_TREE)
+#define vertex_enter() vertex_info("[%s] enter\n", __func__)
+#define vertex_leave() vertex_info("[%s] leave\n", __func__)
+#define vertex_check() vertex_info("[%s] check\n", __func__)
+#else
+#define vertex_enter()
+#define vertex_leave()
+#define vertex_check()
+#endif
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+
+#include "vertex-log.h"
+#include "vertex-io.h"
+#include "vertex-mailbox.h"
+
+static inline ssize_t __vertex_mbox_get_remain_size(
+ struct vertex_mailbox_h2f *mbox)
+{
+ vertex_check();
+ return MAX_MESSAGE_CNT - (mbox->wmsg_idx - mbox->rmsg_idx);
+}
+
+int vertex_mbox_check_full(struct vertex_mailbox_ctrl *mctrl, unsigned int type,
+ int wait)
+{
+ int ret;
+ struct vertex_mailbox_h2f *mbox;
+ int try_count = 1000;
+ ssize_t remain_size;
+
+ vertex_enter();
+ if (type == VERTEX_MTYPE_H2F_NORMAL) {
+ mbox = &mctrl->stack.h2f;
+ } else if (type == VERTEX_MTYPE_H2F_URGENT) {
+ mbox = &mctrl->urgent_stack.h2f;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ while (try_count) {
+ remain_size = __vertex_mbox_get_remain_size(mbox);
+ if (remain_size > 0) {
+ break;
+ } else if (remain_size < 0) {
+ ret = -EFAULT;
+ vertex_err("index of mbox(%u) is invalid (%u/%u)\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx);
+ goto p_err;
+ } else if (wait) {
+ vertex_warn("mbox(%u) has no remain space (%u/%u/%d)\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx,
+ try_count);
+ udelay(10);
+ try_count--;
+ } else {
+ break;
+ }
+ }
+
+ if (!remain_size) {
+ ret = -EBUSY;
+ vertex_err("mbox(%u) has no remain space (%u/%u)\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_mbox_write(struct vertex_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg, size_t size)
+{
+ int ret;
+ struct vertex_mailbox_h2f *mbox;
+ void *wptr;
+ struct vertex_message *debug;
+
+ vertex_enter();
+ if (size > sizeof(struct vertex_message)) {
+ ret = -EINVAL;
+ vertex_err("message size(%zu) for mbox(%u) is invalid\n",
+ size, type);
+ goto p_err;
+ }
+
+ if (type == VERTEX_MTYPE_H2F_NORMAL) {
+ mbox = &mctrl->stack.h2f;
+ } else if (type == VERTEX_MTYPE_H2F_URGENT) {
+ mbox = &mctrl->urgent_stack.h2f;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ wptr = &mbox->msg[mbox->wmsg_idx % MAX_MESSAGE_CNT];
+ vertex_io_copy_mem2io(wptr, msg, size);
+ mbox->wmsg_idx++;
+
+ debug = msg;
+ vertex_dbg("[MBOX-W] cmd:%u/type:%u/wmsg_idx:%u/rmsg_idx:%u\n",
+ debug->type, type, mbox->wmsg_idx, mbox->rmsg_idx);
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static inline int __vertex_mbox_is_empty(struct vertex_mailbox_f2h *mbox)
+{
+ vertex_check();
+ return mbox->wmsg_idx == mbox->rmsg_idx;
+}
+
+int vertex_mbox_check_reply(struct vertex_mailbox_ctrl *mctrl,
+ unsigned int type, int wait)
+{
+ int ret;
+ struct vertex_mailbox_f2h *mbox;
+ int try_count = 1000;
+
+ vertex_enter();
+ if (type == VERTEX_MTYPE_F2H_NORMAL) {
+ mbox = &mctrl->stack.f2h;
+ } else if (type == VERTEX_MTYPE_F2H_URGENT) {
+ mbox = &mctrl->urgent_stack.f2h;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type (%u)\n", type);
+ goto p_err;
+ }
+
+ while (try_count) {
+ ret = __vertex_mbox_is_empty(mbox);
+ if (ret) {
+ if (!wait)
+ break;
+
+ udelay(10);
+ try_count--;
+ } else {
+ break;
+ }
+ }
+
+ if (ret) {
+ ret = -EFAULT;
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_mbox_read(struct vertex_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg)
+{
+ int ret;
+ struct vertex_mailbox_f2h *mbox;
+ void *rptr;
+ struct vertex_message *debug;
+
+ vertex_enter();
+ if (type == VERTEX_MTYPE_F2H_NORMAL) {
+ mbox = &mctrl->stack.f2h;
+ } else if (type == VERTEX_MTYPE_F2H_URGENT) {
+ mbox = &mctrl->urgent_stack.f2h;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ rptr = &mbox->msg[mbox->rmsg_idx % MAX_MESSAGE_CNT];
+ vertex_io_copy_io2mem(msg, rptr, sizeof(struct vertex_message));
+ mbox->rmsg_idx++;
+
+ debug = rptr;
+ vertex_dbg("[MBOX-R] cmd:%u/type:%u/wmsg_idx:%u/rmsg_idx:%u\n",
+ debug->type, type, mbox->wmsg_idx, mbox->rmsg_idx);
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+#ifdef VERTEX_MBOX_EMULATOR
+static inline ssize_t __vertex_mbox_emul_get_remain_size(
+ struct vertex_mailbox_f2h *mbox)
+{
+ vertex_check();
+ return MAX_MESSAGE_CNT - (mbox->wmsg_idx - mbox->rmsg_idx);
+}
+
+static int __vertex_mbox_emul_check_full(struct vertex_mailbox_ctrl *mctrl,
+ unsigned int type, int wait)
+{
+ int ret;
+ struct vertex_mailbox_f2h *mbox;
+ int try_count = 1000;
+ ssize_t remain_size;
+
+ vertex_enter();
+ if (type == VERTEX_MTYPE_F2H_NORMAL) {
+ mbox = &mctrl->stack.f2h;
+ } else if (type == VERTEX_MTYPE_F2H_URGENT) {
+ mbox = &mctrl->urgent_stack.f2h;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ while (try_count) {
+ remain_size = __vertex_mbox_emul_get_remain_size(mbox);
+ if (remain_size > 0) {
+ break;
+ } else if (remain_size < 0) {
+ ret = -EFAULT;
+ vertex_err("index of mbox(%u) is invalid (%u/%u)\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx);
+ goto p_err;
+ } else {
+ vertex_warn("mbox(%u) has no remain space (%u/%u/%d)\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx,
+ try_count);
+ udelay(10);
+ try_count--;
+ }
+ }
+
+ if (!remain_size) {
+ ret = -EBUSY;
+ vertex_err("mbox(%u) has no remain space (%u/%u)\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_mbox_emul_write(struct vertex_mailbox_ctrl *mctrl,
+ unsigned int type, void *msg, size_t size)
+{
+ int ret;
+ struct vertex_mailbox_f2h *mbox;
+ void *wptr;
+
+ vertex_enter();
+ if (size > sizeof(struct vertex_message)) {
+ ret = -EINVAL;
+ vertex_err("message size(%zu) for mbox(%u) is invalid\n",
+ size, type);
+ goto p_err;
+ }
+
+ if (type == VERTEX_MTYPE_F2H_NORMAL) {
+ mbox = &mctrl->stack.f2h;
+ } else if (type == VERTEX_MTYPE_F2H_URGENT) {
+ mbox = &mctrl->urgent_stack.f2h;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ wptr = &mbox->msg[mbox->wmsg_idx % MAX_MESSAGE_CNT];
+ vertex_io_copy_mem2io(wptr, msg, size);
+ mbox->wmsg_idx++;
+
+ vertex_dbg("[MBOX-W] type:%u/wmsg_idx:%u/rmsg_idx:%u\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx);
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static inline int __vertex_mbox_emul_is_empty(struct vertex_mailbox_h2f *mbox)
+{
+ vertex_check();
+ return mbox->wmsg_idx == mbox->rmsg_idx;
+}
+
+static int __vertex_mbox_emul_check_reply(struct vertex_mailbox_ctrl *mctrl,
+ unsigned int type, int wait)
+{
+ int ret;
+ struct vertex_mailbox_h2f *mbox;
+ int try_count = 1000;
+
+ vertex_enter();
+ if (type == VERTEX_MTYPE_H2F_NORMAL) {
+ mbox = &mctrl->stack.h2f;
+ } else if (type == VERTEX_MTYPE_H2F_URGENT) {
+ mbox = &mctrl->urgent_stack.h2f;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type (%u)\n", type);
+ goto p_err;
+ }
+
+ while (try_count) {
+ ret = __vertex_mbox_emul_is_empty(mbox);
+ if (ret) {
+ if (!wait)
+ break;
+
+ udelay(10);
+ try_count--;
+ } else {
+ break;
+ }
+ }
+
+ if (ret) {
+ ret = -EFAULT;
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_mbox_emul_read(struct vertex_mailbox_ctrl *mctrl,
+ unsigned int type, void *msg)
+{
+ int ret;
+ struct vertex_mailbox_h2f *mbox;
+ void *rptr;
+
+ vertex_enter();
+ if (type == VERTEX_MTYPE_H2F_NORMAL) {
+ mbox = &mctrl->stack.h2f;
+ } else if (type == VERTEX_MTYPE_H2F_URGENT) {
+ mbox = &mctrl->urgent_stack.h2f;
+ } else {
+ ret = -EINVAL;
+ vertex_err("invalid mbox type(%u)\n", type);
+ goto p_err;
+ }
+
+ rptr = &mbox->msg[mbox->rmsg_idx % MAX_MESSAGE_CNT];
+ vertex_io_copy_io2mem(msg, rptr, sizeof(struct vertex_message));
+ mbox->rmsg_idx++;
+
+ vertex_dbg("[MBOX-R] type:%u/wmsg_idx:%u/rmsg_idx:%u\n",
+ type, mbox->wmsg_idx, mbox->rmsg_idx);
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_mbox_emul_handler(struct vertex_mailbox_ctrl *mctrl)
+{
+ int ret;
+ struct vertex_message msg;
+ int call_isr = 0;
+
+ vertex_enter();
+ ret = __vertex_mbox_emul_check_reply(mctrl, VERTEX_MTYPE_H2F_URGENT, 0);
+ if (ret)
+ goto p_normal;
+
+ ret = __vertex_mbox_emul_read(mctrl, VERTEX_MTYPE_H2F_URGENT, &msg);
+ if (ret)
+ goto p_normal;
+
+ ret = __vertex_mbox_emul_check_full(mctrl, VERTEX_MTYPE_F2H_URGENT, 0);
+ if (ret)
+ goto p_normal;
+
+ ret = __vertex_mbox_emul_write(mctrl, VERTEX_MTYPE_F2H_URGENT, &msg,
+ sizeof(msg));
+ if (ret)
+ goto p_normal;
+
+ call_isr = 1;
+p_normal:
+ ret = __vertex_mbox_emul_check_reply(mctrl, VERTEX_MTYPE_H2F_NORMAL, 0);
+ if (ret)
+ goto p_err;
+
+ ret = __vertex_mbox_emul_read(mctrl, VERTEX_MTYPE_H2F_NORMAL, &msg);
+ if (ret)
+ goto p_err;
+
+ ret = __vertex_mbox_emul_check_full(mctrl, VERTEX_MTYPE_F2H_NORMAL, 0);
+ if (ret)
+ goto p_err;
+
+ ret = __vertex_mbox_emul_write(mctrl, VERTEX_MTYPE_F2H_NORMAL, &msg,
+ sizeof(msg));
+ if (ret)
+ goto p_err;
+
+ call_isr = 1;
+p_err:
+ vertex_leave();
+ return call_isr;
+}
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_MAILBOX_H__
+#define __VERTEX_MAILBOX_H__
+
+#include "vertex-message.h"
+
+#define MAX_MESSAGE_CNT (8)
+#define VERTEX_MAILBOX_WAIT (true)
+#define VERTEX_MAILBOX_NOWAIT (false)
+
+enum vertex_mailbox_h2f_type {
+ VERTEX_MTYPE_H2F_NORMAL,
+ VERTEX_MTYPE_H2F_URGENT,
+};
+
+enum vertex_mailbox_f2h_type {
+ VERTEX_MTYPE_F2H_NORMAL,
+ VERTEX_MTYPE_F2H_URGENT,
+};
+
+struct vertex_mailbox_h2f {
+ /* Host permission of wmsg is RW */
+ unsigned int wmsg_idx;
+ /* Host permission of rmsg is R_ONLY */
+ unsigned int rmsg_idx;
+ struct vertex_message msg[MAX_MESSAGE_CNT];
+};
+
+struct vertex_mailbox_f2h {
+ /* Host permission of wmsg is RW */
+ unsigned int wmsg_idx;
+ /* Host permission of rmsg is R_ONLY */
+ unsigned int rmsg_idx;
+ struct vertex_message msg[MAX_MESSAGE_CNT];
+};
+
+struct vertex_mailbox_stack {
+ struct vertex_mailbox_h2f h2f;
+ struct vertex_mailbox_f2h f2h;
+};
+
+struct vertex_mailbox_ctrl {
+ struct vertex_mailbox_stack stack;
+ struct vertex_mailbox_stack urgent_stack;
+};
+
+int vertex_mbox_check_full(struct vertex_mailbox_ctrl *mctrl, unsigned int type,
+ int wait);
+int vertex_mbox_write(struct vertex_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg, size_t size);
+
+int vertex_mbox_check_reply(struct vertex_mailbox_ctrl *mctrl,
+ unsigned int type, int wait);
+int vertex_mbox_read(struct vertex_mailbox_ctrl *mctrl, unsigned int type,
+ void *msg);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <asm/cacheflush.h>
+#include <linux/ion_exynos.h>
+#include <linux/exynos_iovmm.h>
+#include <linux/of_reserved_mem.h>
+
+#include "vertex-log.h"
+#include "vertex-mailbox.h"
+#include "vertex-system.h"
+#include "vertex-memory.h"
+
+/* TODO check */
+void vertex_memory_sync_for_device(struct vertex_priv_mem *pmem,
+ off_t offset, size_t size, enum dma_data_direction dir)
+{
+ vertex_enter();
+ if (pmem->kvaddr) {
+ WARN_ON((offset < 0) || (offset > pmem->size));
+ WARN_ON((offset + size) < size);
+ WARN_ON((size > pmem->size) || ((offset + size) > pmem->size));
+
+ __dma_map_area(pmem->kvaddr + offset, size, dir);
+ }
+ vertex_leave();
+}
+
+void vertex_memory_sync_for_cpu(struct vertex_priv_mem *pmem,
+ off_t offset, size_t size, enum dma_data_direction dir)
+{
+ vertex_enter();
+ if (pmem->kvaddr) {
+ WARN_ON((offset < 0) || (offset > pmem->size));
+ WARN_ON((offset + size) < size);
+ WARN_ON((size > pmem->size) || ((offset + size) > pmem->size));
+
+ __dma_unmap_area(pmem->kvaddr + offset, size, dir);
+ }
+ vertex_leave();
+}
+
+static int vertex_memory_map_dmabuf(struct vertex_memory *mem,
+ struct vertex_buffer *buf)
+{
+ int ret;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dvaddr;
+ void *kvaddr;
+
+ vertex_enter();
+ if (buf->m.fd <= 0) {
+ ret = -EINVAL;
+ vertex_err("fd(%d) is invalid\n", buf->m.fd);
+ goto p_err;
+ }
+
+ dbuf = dma_buf_get(buf->m.fd);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ vertex_err("dma_buf is invalid (%d/%d)\n", buf->m.fd, ret);
+ goto p_err;
+ }
+ buf->dbuf = dbuf;
+
+ buf->aligned_size = PAGE_ALIGN(buf->size);
+ if (buf->aligned_size > dbuf->size) {
+ ret = -EINVAL;
+ vertex_err("size is invalid (%zu/%zu/%zu)\n",
+ buf->size, buf->aligned_size, dbuf->size);
+ goto p_err_size;
+ }
+
+ attachment = dma_buf_attach(dbuf, mem->dev);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ vertex_err("failed to attach dma-buf (%d)\n", ret);
+ goto p_err_attach;
+ }
+ buf->attachment = attachment;
+
+ sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ vertex_err("failed to map attachment (%d)\n", ret);
+ goto p_err_map_attach;
+ }
+ buf->sgt = sgt;
+
+ dvaddr = ion_iovmm_map(attachment, 0, buf->aligned_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (IS_ERR_VALUE(dvaddr)) {
+ ret = (int)dvaddr;
+ vertex_err("failed to map iova (%d)\n", ret);
+ goto p_err_iova;
+ }
+ buf->dvaddr = dvaddr;
+
+ /* TODO: check sync */
+ kvaddr = dma_buf_vmap(dbuf);
+ if (IS_ERR(kvaddr)) {
+ ret = PTR_ERR(kvaddr);
+ vertex_err("failed to map kvaddr (%d)\n", ret);
+ goto p_err_kva;
+ }
+ buf->kvaddr = kvaddr;
+
+ vertex_leave();
+ return 0;
+p_err_kva:
+ ion_iovmm_unmap(attachment, dvaddr);
+p_err_iova:
+ dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+p_err_map_attach:
+ dma_buf_detach(dbuf, attachment);
+p_err_attach:
+p_err_size:
+ dma_buf_put(dbuf);
+p_err:
+ return ret;
+}
+
+static int vertex_memory_unmap_dmabuf(struct vertex_memory *mem,
+ struct vertex_buffer *buf)
+{
+ vertex_enter();
+ dma_buf_vunmap(buf->dbuf, buf->kvaddr);
+ ion_iovmm_unmap(buf->attachment, buf->dvaddr);
+ dma_buf_unmap_attachment(buf->attachment, buf->sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(buf->dbuf, buf->attachment);
+ dma_buf_put(buf->dbuf);
+ vertex_leave();
+ return 0;
+}
+
+static int vertex_memory_map_userptr(struct vertex_memory *mem,
+ struct vertex_buffer *buf)
+{
+ vertex_enter();
+ vertex_leave();
+ return -EINVAL;
+}
+
+static int vertex_memory_unmap_userptr(struct vertex_memory *mem,
+ struct vertex_buffer *buf)
+{
+ vertex_enter();
+ vertex_leave();
+ return -EINVAL;
+}
+
+const struct vertex_memory_ops vertex_memory_ops = {
+ .map_dmabuf = vertex_memory_map_dmabuf,
+ .unmap_dmabuf = vertex_memory_unmap_dmabuf,
+ .map_userptr = vertex_memory_map_userptr,
+ .unmap_userptr = vertex_memory_unmap_userptr
+};
+
+static int __vertex_memory_alloc(struct vertex_memory *mem,
+ struct vertex_priv_mem *pmem)
+{
+ int ret;
+ const char *heap_name = "ion_system_heap";
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dvaddr;
+ void *kvaddr;
+
+ vertex_enter();
+ dbuf = ion_alloc_dmabuf(heap_name, pmem->size, pmem->flags);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ vertex_err("Failed to allocate dma_buf (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_alloc;
+ }
+ pmem->dbuf = dbuf;
+
+ attachment = dma_buf_attach(dbuf, mem->dev);
+ if (IS_ERR(attachment)) {
+ ret = PTR_ERR(attachment);
+ vertex_err("Failed to attach dma_buf (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_attach;
+ }
+ pmem->attachment = attachment;
+
+ sgt = dma_buf_map_attachment(attachment, pmem->direction);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ vertex_err("Failed to map attachment (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_map_attachment;
+ }
+ pmem->sgt = sgt;
+
+ dvaddr = ion_iovmm_map(attachment, 0, pmem->size, pmem->direction, 0);
+ if (IS_ERR_VALUE(dvaddr)) {
+ ret = (int)dvaddr;
+ vertex_err("Failed to map dvaddr (%d) [%s]\n", ret, pmem->name);
+ goto p_err_map_dva;
+ }
+ pmem->dvaddr = dvaddr;
+
+ if (pmem->kmap) {
+ kvaddr = dma_buf_vmap(dbuf);
+ if (IS_ERR(kvaddr)) {
+ ret = PTR_ERR(kvaddr);
+ vertex_err("Failed to map kvaddr (%d) [%s]\n",
+ ret, pmem->name);
+ goto p_err_kmap;
+ }
+ pmem->kvaddr = kvaddr;
+ }
+
+ vertex_leave();
+ return 0;
+p_err_kmap:
+ ion_iovmm_unmap(attachment, dvaddr);
+p_err_map_dva:
+ dma_buf_unmap_attachment(attachment, sgt, pmem->direction);
+p_err_map_attachment:
+ dma_buf_detach(dbuf, attachment);
+p_err_attach:
+ dma_buf_put(dbuf);
+p_err_alloc:
+ return ret;
+}
+
+static void __vertex_memory_free(struct vertex_memory *mem,
+ struct vertex_priv_mem *pmem)
+{
+ vertex_enter();
+ if (pmem->kmap)
+ dma_buf_vunmap(pmem->dbuf, pmem->kvaddr);
+
+ ion_iovmm_unmap(pmem->attachment, pmem->dvaddr);
+ dma_buf_unmap_attachment(pmem->attachment, pmem->sgt, pmem->direction);
+ dma_buf_detach(pmem->dbuf, pmem->attachment);
+ dma_buf_put(pmem->dbuf);
+ dma_buf_put(pmem->dbuf);
+ vertex_leave();
+}
+
+dma_addr_t vertex_memory_allocate_heap(struct vertex_memory *mem,
+ int id, size_t size)
+{
+ int ret;
+ struct vertex_priv_mem *heap;
+
+ vertex_enter();
+ heap = &mem->heap;
+
+ snprintf(heap->name, VERTEX_PRIV_MEM_NAME_LEN, "vertex_heap_%u", id);
+ heap->size = PAGE_ALIGN(size);
+ heap->flags = 0;
+ heap->direction = DMA_BIDIRECTIONAL;
+ heap->kmap = false;
+
+ ret = __vertex_memory_alloc(mem, heap);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return heap->dvaddr;
+p_err:
+ return ret;
+}
+
+void vertex_memory_free_heap(struct vertex_memory *mem)
+{
+ vertex_enter();
+ __vertex_memory_free(mem, &mem->heap);
+ vertex_leave();
+}
+
+int vertex_memory_open(struct vertex_memory *mem)
+{
+ int ret;
+ struct vertex_priv_mem *fw;
+ struct vertex_priv_mem *mbox;
+ struct vertex_priv_mem *debug;
+
+ vertex_enter();
+ fw = &mem->fw;
+ mbox = &mem->mbox;
+ debug = &mem->debug;
+
+ ret = iommu_map(mem->domain, fw->dvaddr, fw->paddr, fw->size, 0);
+ if (ret) {
+ vertex_err("iommu mapping is failed (0x%x/0x%x/%zu)(%d)\n",
+ (int)fw->dvaddr, (int)fw->paddr, fw->size, ret);
+ goto p_err_map;
+ }
+
+ /* TODO check */
+ //fw->kvaddr = dmam_alloc_coherent(mem->dev, fw->size, &fw->paddr,
+ // GFP_KERNEL);
+ //if (!fw->kvaddr) {
+ // ret = -ENOMEM;
+ // vertex_err("Failed to allocate coherent memory for CM7 DRAM\n");
+ // goto p_err_cm7;
+ //}
+ //
+ //iommu_map(mem->domain, fw->dvaddr, fw->paddr, fw->size, 0);
+
+ ret = __vertex_memory_alloc(mem, mbox);
+ if (ret)
+ goto p_err_mbox;
+
+ ret = __vertex_memory_alloc(mem, debug);
+ if (ret)
+ goto p_err_debug;
+
+ vertex_leave();
+ return 0;
+p_err_debug:
+ __vertex_memory_free(mem, mbox);
+p_err_mbox:
+ iommu_unmap(mem->domain, fw->dvaddr, fw->size);
+p_err_map:
+ return ret;
+}
+
+int vertex_memory_close(struct vertex_memory *mem)
+{
+ struct vertex_priv_mem *fw;
+ struct vertex_priv_mem *mbox;
+ struct vertex_priv_mem *debug;
+
+ vertex_enter();
+ fw = &mem->fw;
+ mbox = &mem->mbox;
+ debug = &mem->debug;
+
+ __vertex_memory_free(mem, debug);
+ __vertex_memory_free(mem, mbox);
+
+ iommu_unmap(mem->domain, fw->dvaddr, fw->size);
+ /* TODO check */
+ //dmam_free_coherent(mem->dev, fw->size, fw->kvaddr, fw->paddr);
+
+ vertex_leave();
+ return 0;
+}
+
+extern struct reserved_mem *vipx_cm7_rmem;
+
+int vertex_memory_probe(struct vertex_system *sys)
+{
+ struct device *dev;
+ struct vertex_memory *mem;
+ struct vertex_priv_mem *fw;
+ struct vertex_priv_mem *mbox;
+ struct vertex_priv_mem *debug;
+
+ vertex_enter();
+ dev = sys->dev;
+ dma_set_mask(dev, DMA_BIT_MASK(36));
+
+ mem = &sys->memory;
+ mem->dev = dev;
+ mem->domain = get_domain_from_dev(dev);
+ mem->mops = &vertex_memory_ops;
+
+ fw = &mem->fw;
+ mbox = &mem->mbox;
+ debug = &mem->debug;
+
+ snprintf(fw->name, VERTEX_PRIV_MEM_NAME_LEN, "vertex_cm7_dram_bin");
+ fw->size = vipx_cm7_rmem->size;
+ fw->kvaddr = phys_to_virt(vipx_cm7_rmem->base);
+ fw->paddr = vipx_cm7_rmem->base;
+ fw->dvaddr = VERTEX_CM7_DRAM_BIN_DVADDR;
+
+ snprintf(mbox->name, VERTEX_PRIV_MEM_NAME_LEN, "vertex_mbox");
+ mbox->size = PAGE_ALIGN(sizeof(struct vertex_mailbox_ctrl));
+ mbox->flags = 0;
+ mbox->direction = DMA_BIDIRECTIONAL;
+ mbox->kmap = true;
+
+ snprintf(debug->name, VERTEX_PRIV_MEM_NAME_LEN, "vertex_debug");
+ debug->size = PAGE_ALIGN(VERTEX_DEBUG_SIZE);
+ debug->flags = 0;
+ debug->direction = DMA_BIDIRECTIONAL;
+ /* TODO remove */
+ debug->kmap = true;
+
+ vertex_leave();
+ return 0;
+}
+
+void vertex_memory_remove(struct vertex_memory *mem)
+{
+ vertex_enter();
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos5 SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_MEMORY_H__
+#define __VERTEX_MEMORY_H__
+
+#include <linux/dma-buf.h>
+
+#include "vs4l.h"
+#include "vertex-common-type.h"
+
+/* TODO temp for test */
+#define VERTEX_CM7_DRAM_BIN_DVADDR (0xB8000000)
+#define VERTEX_DEBUG_SIZE (SZ_16M)
+
+#define VERTEX_PRIV_MEM_NAME_LEN (30)
+
+struct vertex_system;
+struct vertex_memory;
+
+struct vertex_buffer {
+ unsigned char addr_type;
+ unsigned char mem_attr;
+ unsigned char mem_type;
+ unsigned char reserved;
+ size_t size;
+ size_t aligned_size;
+ union {
+ unsigned long userptr;
+ int fd;
+ } m;
+
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dvaddr;
+ void *kvaddr;
+
+ struct vs4l_roi roi;
+};
+
+struct vertex_memory_ops {
+ int (*map_dmabuf)(struct vertex_memory *mem,
+ struct vertex_buffer *buf);
+ int (*unmap_dmabuf)(struct vertex_memory *mem,
+ struct vertex_buffer *buf);
+ int (*map_userptr)(struct vertex_memory *mem,
+ struct vertex_buffer *buf);
+ int (*unmap_userptr)(struct vertex_memory *mem,
+ struct vertex_buffer *buf);
+};
+
+struct vertex_priv_mem {
+ char name[VERTEX_PRIV_MEM_NAME_LEN];
+ size_t size;
+ long flags;
+ enum dma_data_direction direction;
+ bool kmap;
+
+ void *kvaddr;
+ dma_addr_t paddr;
+ dma_addr_t dvaddr;
+
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+};
+
+struct vertex_memory {
+ struct device *dev;
+ struct iommu_domain *domain;
+ const struct vertex_memory_ops *mops;
+
+ struct vertex_priv_mem fw;
+ struct vertex_priv_mem mbox;
+ struct vertex_priv_mem heap;
+ struct vertex_priv_mem debug;
+};
+
+dma_addr_t vertex_memory_allocate_heap(struct vertex_memory *mem,
+ int id, size_t size);
+void vertex_memory_free_heap(struct vertex_memory *mem);
+
+int vertex_memory_open(struct vertex_memory *mem);
+int vertex_memory_close(struct vertex_memory *mem);
+int vertex_memory_probe(struct vertex_system *sys);
+void vertex_memory_remove(struct vertex_memory *mem);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Version history
+ * 2017.06.27 : Initial draft
+ * 2017.08.25 : Released
+ * 2017.10.12 : Add BOOTUP_RSP
+ */
+
+#ifndef __VERTEX_MESSAGE_H__
+#define __VERTEX_MESSAGE_H__
+
+/* temp for previous host ver */
+//#define AP_FOCAL_ZONE_SETTING (0)
+#define AP_FOCAL_ZONE_SETTING (1)
+
+//#define MAX_NUM_OF_INPUTS (16)
+#define MAX_NUM_OF_INPUTS (24)
+//#define MAX_NUM_OF_OUTPUTS (12)
+#define MAX_NUM_OF_OUTPUTS (8)
+//#define MAX_NUM_OF_USER_PARAMS (8)
+#define MAX_NUM_OF_USER_PARAMS (180)
+
+#define VERTEX_CM7_HEAP_SIZE_PREVIEW (SZ_16M)
+#define VERTEX_CM7_HEAP_SIZE_ENF (SZ_64M)
+#define VERTEX_CM7_HEAP_SIZE_CAPTURE (SZ_128M)
+
+enum {
+ BOOTUP_RSP = 1,
+ INIT_REQ,
+ INIT_RSP,
+ CREATE_GRAPH_REQ,
+ CREATE_GRAPH_RSP,
+ SET_GRAPH_REQ,
+ SET_GRAPH_RSP,
+ INVOKE_GRAPH_REQ,
+ INVOKE_GRAPH_ACK,
+ INVOKE_GRAPH_RSP,
+ DESTROY_GRAPH_REQ,
+ DESTROY_GRAPH_RSP,
+ ABORT_GRAPH_REQ,
+ ABORT_GRAPH_RSP,
+ POWER_DOWN_REQ,
+ POWER_DOWN_RSP,
+ TEST_REQ,
+ TEST_RSP,
+ MAX_MSG_TYPE,
+};
+
+enum {
+ SUCCESS = 0,
+ ERROR_INVALID_GRAPH_ID = -1,
+ ERROR_EXEC_PARAM_CORRUPTION = -2,
+ ERROR_GRAPH_DATA_CORRUPTION = -3,
+ ERROR_COMPILED_GRAPH_ENABLED = -4,
+ ERROR_INVALID_MSG_TYPE = -5,
+ ERROR_UNKNOWN = -6,
+ ERROR_INVALID = -7,
+ ERROR_MBX_BUSY = -8,
+};
+
+enum {
+ SCENARIO_DE = 1,
+ SCENARIO_SDOF_FULL,
+ SCENARIO_DE_SDOF,
+ // temp for previous host ver
+ // SCENARIO_ENF,
+ SCENARIO_DE_CAPTURE,
+ SCENARIO_SDOF_CAPTURE,
+ SCENARIO_DE_SDOF_CAPTURE,
+ SCENARIO_GDC,
+ SCENARIO_ENF,
+ SCENARIO_ENF_UV,
+ SCENARIO_ENF_YUV, /*10*/
+ SCENARIO_BLEND,
+ SCENARIO_MAX,
+};
+
+struct vertex_bootup_rsp {
+ int error;
+};
+
+/**
+ * struct vertex_init_req
+ * @p_cc_heap: pointer to CC heap region
+ * @sz_cc_heap: size of CC heap region
+ */
+struct vertex_init_req {
+ unsigned int p_cc_heap;
+ unsigned int sz_cc_heap;
+};
+
+struct vertex_init_rsp {
+ int error;
+};
+
+/**
+ * struct vertex_create_graph_req
+ * @p_graph: pointer to compiled graph
+ * @sz_graph: size of compiled graph
+ */
+struct vertex_create_graph_req {
+ unsigned int p_graph;
+ unsigned int sz_graph;
+};
+
+/**
+ * struct vertex_create_graph_rsp
+ * @graph_id: graph id that should be used for invocation
+ */
+struct vertex_create_graph_rsp {
+ int error;
+ unsigned int graph_id;
+};
+
+/**
+ * struct vertex_set_graph_req
+ * @graph_id: graph id that should be used for invocation
+ * @p_lll_bin: pointer to VIP binary in DRAM
+ * @sz_lll_bin: size of VIP binary in DRAM
+ * @num_inputs: number of inputs for the graph
+ * @num_outputs: number of outputs for the graph
+ * @input_width: array of input width sizes
+ * @input_height: array of input height sizes
+ * @input_depth: array of input depth sizes
+ * @output_width: array of output width sizes
+ * @output_height: array of output height sizes
+ * @output_depth: array of output depth sizes
+ * @p_temp: pointer to DRAM area for temporary buffers
+ * @sz_temp: size of temporary buffer area
+ */
+struct vertex_set_graph_req {
+ unsigned int graph_id;
+ unsigned int p_lll_bin;
+ unsigned int sz_lll_bin;
+ int num_inputs;
+ int num_outputs;
+
+ unsigned int input_width[MAX_NUM_OF_INPUTS];
+ unsigned int input_height[MAX_NUM_OF_INPUTS];
+ unsigned int input_depth[MAX_NUM_OF_INPUTS];
+ unsigned int output_width[MAX_NUM_OF_OUTPUTS];
+ unsigned int output_height[MAX_NUM_OF_OUTPUTS];
+ unsigned int output_depth[MAX_NUM_OF_OUTPUTS];
+
+ unsigned int p_temp;
+ unsigned int sz_temp;
+};
+
+struct vertex_set_graph_rsp {
+ int error;
+};
+
+/**
+ * struct vertex_invoke_graph_req
+ * @graph_id: graph id that should be used for invocation
+ * @num_inputs: number of inputs for the graph
+ * @num_outputs: number of outputs for the graph
+ * @p_input: array of input buffers
+ * @p_output: array of output buffers
+ * @user_params: array of user parameters
+ */
+struct vertex_invoke_graph_req {
+ unsigned int graph_id;
+ int num_inputs;
+ int num_outputs;
+ unsigned int p_input[MAX_NUM_OF_INPUTS];
+ unsigned int p_output[MAX_NUM_OF_OUTPUTS];
+#if AP_FOCAL_ZONE_SETTING
+ unsigned int user_params[MAX_NUM_OF_USER_PARAMS];
+#endif
+};
+
+struct vertex_invoke_graph_ack {
+ int error;
+ unsigned int job_id;
+};
+
+struct vertex_invoke_graph_rsp {
+ int error;
+ unsigned int job_id;
+};
+
+struct vertex_abort_graph_req {
+ unsigned int job_id;
+};
+
+struct vertex_abort_graph_rsp {
+ int error;
+};
+
+struct vertex_destroy_graph_req {
+ unsigned int graph_id;
+};
+
+struct vertex_destroy_graph_rsp {
+ int error;
+};
+
+struct vertex_powerdown_req {
+ unsigned int valid;
+};
+
+struct vertex_powerdown_rsp {
+ int error;
+};
+
+struct vertex_test_req {
+ unsigned int test;
+};
+
+struct vertex_test_rsp {
+ int error;
+};
+
+union vertex_message_payload {
+ struct vertex_bootup_rsp bootup_rsp;
+ struct vertex_init_req init_req;
+ struct vertex_init_rsp init_rsp;
+ struct vertex_create_graph_req create_graph_req;
+ struct vertex_create_graph_rsp create_graph_rsp;
+ struct vertex_set_graph_req set_graph_req;
+ struct vertex_set_graph_rsp set_graph_rsp;
+ struct vertex_invoke_graph_req invoke_graph_req;
+ struct vertex_invoke_graph_ack invoke_graph_ack;
+ struct vertex_invoke_graph_rsp invoke_graph_rsp;
+ struct vertex_destroy_graph_req destroy_graph_req;
+ struct vertex_destroy_graph_rsp destroy_graph_rsp;
+ struct vertex_abort_graph_req abort_graph_req;
+ struct vertex_abort_graph_rsp abort_graph_rsp;
+ struct vertex_powerdown_req powerdown_req;
+ struct vertex_powerdown_rsp powerdown_rsp;
+ struct vertex_test_req test_req;
+ struct vertex_test_rsp test_rsp;
+};
+
+struct vertex_message {
+ /* TODO valid flag to be removed */
+ unsigned int valid;
+ unsigned int trans_id;
+ unsigned int type;
+ union vertex_message_payload payload;
+};
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/ion_exynos.h>
+
+#include "vertex-log.h"
+#include "vertex-device.h"
+#include "vertex-core.h"
+#include "vertex-context.h"
+#include "vertex-queue.h"
+
+static struct vertex_format_type vertex_fmts[] = {
+ {
+ .name = "RGB",
+ .colorspace = VS4L_DF_IMAGE_RGB,
+ .planes = 1,
+ .bitsperpixel = { 24 }
+ }, {
+ .name = "ARGB",
+ .colorspace = VS4L_DF_IMAGE_RGBX,
+ .planes = 1,
+ .bitsperpixel = { 32 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .colorspace = VS4L_DF_IMAGE_NV12,
+ .planes = 2,
+ .bitsperpixel = { 8, 8 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .colorspace = VS4L_DF_IMAGE_NV21,
+ .planes = 2,
+ .bitsperpixel = { 8, 8 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cr/Cb",
+ .colorspace = VS4L_DF_IMAGE_YV12,
+ .planes = 3,
+ .bitsperpixel = { 8, 4, 4 }
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .colorspace = VS4L_DF_IMAGE_I420,
+ .planes = 3,
+ .bitsperpixel = { 8, 4, 4 }
+ }, {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .colorspace = VS4L_DF_IMAGE_I422,
+ .planes = 3,
+ .bitsperpixel = { 8, 4, 4 }
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .colorspace = VS4L_DF_IMAGE_YUYV,
+ .planes = 1,
+ .bitsperpixel = { 16 }
+ }, {
+ .name = "YUV 4:4:4 packed, YCbCr",
+ .colorspace = VS4L_DF_IMAGE_YUV4,
+ .planes = 1,
+ .bitsperpixel = { 24 }
+ }, {
+ .name = "VX unsigned 8 bit",
+ .colorspace = VS4L_DF_IMAGE_U8,
+ .planes = 1,
+ .bitsperpixel = { 8 }
+ }, {
+ .name = "VX unsigned 16 bit",
+ .colorspace = VS4L_DF_IMAGE_U16,
+ .planes = 1,
+ .bitsperpixel = { 16 }
+ }, {
+ .name = "VX unsigned 32 bit",
+ .colorspace = VS4L_DF_IMAGE_U32,
+ .planes = 1,
+ .bitsperpixel = { 32 }
+ }, {
+ .name = "VX signed 16 bit",
+ .colorspace = VS4L_DF_IMAGE_S16,
+ .planes = 1,
+ .bitsperpixel = { 16 }
+ }, {
+ .name = "VX signed 32 bit",
+ .colorspace = VS4L_DF_IMAGE_S32,
+ .planes = 1,
+ .bitsperpixel = { 32 }
+ }
+};
+
+static struct vertex_format_type *__vertex_queue_find_format(
+ unsigned int format)
+{
+ int ret;
+ unsigned int idx;
+ struct vertex_format_type *fmt = NULL;
+
+ vertex_enter();
+ for (idx = 0; idx < ARRAY_SIZE(vertex_fmts); ++idx) {
+ if (vertex_fmts[idx].colorspace == format) {
+ fmt = &vertex_fmts[idx];
+ break;
+ }
+ }
+
+ if (!fmt) {
+ ret = -EINVAL;
+ vertex_err("Vision format is invalid (%u)\n", format);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return fmt;
+p_err:
+ return ERR_PTR(ret);
+}
+
+static int __vertex_queue_plane_size(struct vertex_buffer_format *format)
+{
+ int ret;
+ unsigned int plane;
+ struct vertex_format_type *fmt;
+ unsigned int width, height;
+
+ vertex_enter();
+ fmt = format->fmt;
+ if (fmt->planes > VERTEX_MAX_PLANES) {
+ ret = -EINVAL;
+ vertex_err("planes(%u) is too big (%u)\n",
+ fmt->planes, VERTEX_MAX_PLANES);
+ goto p_err;
+ }
+
+ for (plane = 0; plane < fmt->planes; ++plane) {
+ width = format->width *
+ fmt->bitsperpixel[plane] / BITS_PER_BYTE;
+ height = format->height *
+ fmt->bitsperpixel[plane] / BITS_PER_BYTE;
+ format->size[plane] = width * height;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_queue_wait_for_done(struct vertex_queue *queue)
+{
+ int ret;
+ /* TODO: check wait time */
+ unsigned long wait_time = 10000;
+
+ vertex_enter();
+ if (!queue->streaming) {
+ ret = -EINVAL;
+ vertex_err("queue is not streaming status\n");
+ goto p_err;
+ }
+
+ if (!list_empty(&queue->done_list))
+ return 0;
+
+ mutex_unlock(queue->lock);
+
+ ret = wait_event_interruptible_timeout(queue->done_wq,
+ !list_empty(&queue->done_list) || !queue->streaming,
+ msecs_to_jiffies(wait_time));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ vertex_err("Wait time(%lu ms) is over\n", wait_time);
+ } else if (ret > 0) {
+ ret = 0;
+ } else {
+ vertex_err("Waiting has ended abnormaly (%d)\n", ret);
+ }
+
+ mutex_lock(queue->lock);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static struct vertex_bundle *__vertex_queue_get_done_bundle(
+ struct vertex_queue *queue,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vertex_bundle *bundle;
+ unsigned long flags;
+
+ vertex_enter();
+ /* Wait for at least one buffer to become available on the done_list */
+ ret = __vertex_queue_wait_for_done(queue);
+ if (ret)
+ goto p_err;
+
+ /*
+ * Driver's lock has been held since we last verified that done_list
+ * is not empty, so no need for another list_empty(done_list) check.
+ */
+ spin_lock_irqsave(&queue->done_lock, flags);
+
+ bundle = list_first_entry(&queue->done_list, struct vertex_bundle,
+ done_entry);
+
+ list_del(&bundle->done_entry);
+ atomic_dec(&queue->done_count);
+
+ spin_unlock_irqrestore(&queue->done_lock, flags);
+
+ vertex_leave();
+ return bundle;
+p_err:
+ return ERR_PTR(ret);
+}
+
+static void __vertex_queue_fill_vs4l_buffer(struct vertex_bundle *bundle,
+ struct vs4l_container_list *kclist)
+{
+ struct vertex_container_list *vclist;
+
+ vertex_enter();
+ vclist = &bundle->clist;
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_TIMESTAMP);
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_PREPARE);
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_INVALID);
+ kclist->flags &= ~(1 << VS4L_CL_FLAG_DONE);
+
+ if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &vclist->flags)) {
+ if (sizeof(kclist->timestamp) == sizeof(vclist->timestamp)) {
+ kclist->flags |= (1 << VS4L_CL_FLAG_TIMESTAMP);
+ memcpy(kclist->timestamp, vclist->timestamp,
+ sizeof(vclist->timestamp));
+ } else {
+ vertex_warn("timestamp of clist is different (%zu/%zu)\n",
+ sizeof(kclist->timestamp),
+ sizeof(vclist->timestamp));
+ }
+ }
+
+ if (test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
+ kclist->flags |= (1 << VS4L_CL_FLAG_PREPARE);
+
+ if (test_bit(VS4L_CL_FLAG_INVALID, &bundle->flags))
+ kclist->flags |= (1 << VS4L_CL_FLAG_INVALID);
+
+ if (test_bit(VS4L_CL_FLAG_DONE, &bundle->flags))
+ kclist->flags |= (1 << VS4L_CL_FLAG_DONE);
+
+ kclist->index = vclist->index;
+ kclist->id = vclist->id;
+ vertex_leave();
+}
+
+static int __vertex_queue_map_dmabuf(struct vertex_queue *queue,
+ struct vertex_buffer *buf, size_t size)
+{
+ int ret;
+ struct vertex_core *core;
+ struct vertex_memory *mem;
+
+ vertex_enter();
+ core = queue->qlist->vctx->core;
+ mem = &core->system->memory;
+
+ buf->size = size;
+ ret = mem->mops->map_dmabuf(mem, buf);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_queue_unmap_dmabuf(struct vertex_queue *queue,
+ struct vertex_buffer *buf)
+{
+ struct vertex_core *core;
+ struct vertex_memory *mem;
+
+ vertex_enter();
+ core = queue->qlist->vctx->core;
+ mem = &core->system->memory;
+ mem->mops->unmap_dmabuf(mem, buf);
+ vertex_leave();
+}
+
+static int __vertex_queue_bundle_prepare(struct vertex_queue *queue,
+ struct vertex_bundle *bundle)
+{
+ int ret;
+ struct vertex_container *con;
+ struct vertex_buffer *buf;
+ struct vertex_buffer_format *fmt;
+ unsigned int c_cnt, count, b_cnt;
+
+ vertex_enter();
+ if (test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
+ return 0;
+
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ switch (con[c_cnt].type) {
+ case VS4L_BUFFER_LIST:
+ case VS4L_BUFFER_ROI:
+ case VS4L_BUFFER_PYRAMID:
+ count = con[c_cnt].count;
+ break;
+ default:
+ ret = -EINVAL;
+ vertex_err("container type is invalid (%u)\n",
+ con[c_cnt].type);
+ goto p_err;
+ }
+
+ fmt = con[c_cnt].format;
+ switch (con[c_cnt].memory) {
+ case VS4L_MEMORY_DMABUF:
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < count; ++b_cnt) {
+ ret = __vertex_queue_map_dmabuf(queue,
+ &buf[b_cnt],
+ fmt->size[b_cnt]);
+ if (ret)
+ goto p_err;
+ }
+ break;
+ default:
+ vertex_err("container memory type is invalid (%u)\n",
+ con[c_cnt].memory);
+ goto p_err;
+ }
+ }
+ set_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_queue_bundle_unprepare(struct vertex_queue *queue,
+ struct vertex_bundle *bundle)
+{
+ int ret;
+ struct vertex_container *con;
+ struct vertex_buffer *buf;
+ unsigned int c_cnt, count, b_cnt;
+
+ vertex_enter();
+ if (!test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
+ goto p_err;
+
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ switch (con[c_cnt].type) {
+ case VS4L_BUFFER_LIST:
+ case VS4L_BUFFER_ROI:
+ case VS4L_BUFFER_PYRAMID:
+ count = con[c_cnt].count;
+ break;
+ default:
+ ret = -EINVAL;
+ vertex_err("container type is invalid (%u)\n",
+ con[c_cnt].type);
+ goto p_err;
+ }
+
+ switch (con[c_cnt].memory) {
+ case VS4L_MEMORY_DMABUF:
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < count; ++b_cnt)
+ __vertex_queue_unmap_dmabuf(queue, &buf[b_cnt]);
+ break;
+ default:
+ vertex_err("container memory type is invalid (%u)\n",
+ con[c_cnt].memory);
+ goto p_err;
+ }
+ }
+
+ clear_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_queue_bundle_check(struct vertex_bundle *bundle,
+ struct vs4l_container_list *kclist)
+{
+ int ret;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer *kbuf;
+ struct vertex_container_list *vclist;
+ struct vertex_container *vcon;
+ struct vertex_buffer *vbuf;
+ unsigned int c_cnt, b_cnt;
+
+ vertex_enter();
+ vclist = &bundle->clist;
+
+ if (vclist->direction != kclist->direction) {
+ ret = -EINVAL;
+ vertex_err("direction of clist is different (%u/%u)\n",
+ vclist->direction, kclist->direction);
+ goto p_err;
+ }
+
+ if (vclist->index != kclist->index) {
+ ret = -EINVAL;
+ vertex_err("index of clist is different (%u/%u)\n",
+ vclist->index, kclist->index);
+ goto p_err;
+ }
+
+ if (vclist->count != kclist->count) {
+ ret = -EINVAL;
+ vertex_err("container count of clist is differnet (%u/%u)\n",
+ vclist->count, kclist->count);
+ goto p_err;
+ }
+
+ vclist->id = kclist->id;
+ vclist->flags = kclist->flags;
+
+ vcon = vclist->containers;
+ kcon = kclist->containers;
+ for (c_cnt = 0; c_cnt < vclist->count; ++c_cnt) {
+ if (vcon[c_cnt].target != kcon[c_cnt].target) {
+ ret = -EINVAL;
+ vertex_err("target of container is different (%u/%u)\n",
+ vcon[c_cnt].target, kcon[c_cnt].target);
+ goto p_err;
+ }
+
+ if (vcon[c_cnt].count != kcon[c_cnt].count) {
+ ret = -EINVAL;
+ vertex_err("count of container is different (%u/%u)\n",
+ vcon[c_cnt].count, kcon[c_cnt].count);
+ goto p_err;
+ }
+
+ vbuf = vcon[c_cnt].buffers;
+ kbuf = kcon[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < vcon[c_cnt].count; ++b_cnt) {
+ vbuf[b_cnt].roi = kbuf[b_cnt].roi;
+ if (vbuf[b_cnt].m.fd != kbuf[b_cnt].m.fd) {
+ ret = -EINVAL;
+ vertex_err("fd of buffer is different (%d/%d)\n",
+ vbuf[b_cnt].m.fd,
+ kbuf[b_cnt].m.fd);
+ goto p_err;
+ }
+ }
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_queue_bundle_alloc(struct vertex_queue *queue,
+ struct vs4l_container_list *kclist)
+{
+ int ret;
+ struct vs4l_container *kcon;
+ struct vs4l_buffer *kbuf;
+ struct vertex_bundle *bundle;
+ struct vertex_container_list *vclist;
+ struct vertex_container *vcon;
+ struct vertex_buffer *vbuf;
+ size_t alloc_size;
+ char *ptr;
+ unsigned int c_cnt, f_cnt, b_cnt;
+
+ vertex_enter();
+ /* All memory allocation and pointer value assignment */
+ kcon = kclist->containers;
+ alloc_size = sizeof(*bundle);
+ for (c_cnt = 0; c_cnt < kclist->count; ++c_cnt)
+ alloc_size += sizeof(*vcon) + sizeof(*vbuf) * kcon[c_cnt].count;
+
+ bundle = kzalloc(alloc_size, GFP_KERNEL);
+ if (!bundle) {
+ ret = -ENOMEM;
+ vertex_err("Failed to alloc bundle (%zu)\n", alloc_size);
+ goto p_err_alloc;
+ }
+
+ vclist = &bundle->clist;
+ ptr = (char *)bundle + sizeof(*bundle);
+ vclist->containers = (struct vertex_container *)ptr;
+ ptr += sizeof(*vcon) * kclist->count;
+ for (c_cnt = 0; c_cnt < kclist->count; ++c_cnt) {
+ vclist->containers[c_cnt].buffers = (struct vertex_buffer *)ptr;
+ ptr += sizeof(*vbuf) * kcon[c_cnt].count;
+ }
+
+ vclist->direction = kclist->direction;
+ vclist->id = kclist->id;
+ vclist->index = kclist->index;
+ vclist->flags = kclist->flags;
+ vclist->count = kclist->count;
+
+ if (sizeof(vclist->user_params) != sizeof(kclist->user_params)) {
+ ret = -EINVAL;
+ vertex_err("user params size is invalid (%zu/%zu)\n",
+ sizeof(vclist->user_params),
+ sizeof(kclist->user_params));
+ goto p_err_param;
+ }
+ memcpy(vclist->user_params, kclist->user_params,
+ sizeof(vclist->user_params));
+
+ vcon = vclist->containers;
+ for (c_cnt = 0; c_cnt < kclist->count; ++c_cnt) {
+ vcon[c_cnt].type = kcon[c_cnt].type;
+ vcon[c_cnt].target = kcon[c_cnt].target;
+ vcon[c_cnt].memory = kcon[c_cnt].memory;
+ if (sizeof(vcon[c_cnt].reserved) !=
+ sizeof(kcon[c_cnt].reserved)) {
+ ret = -EINVAL;
+ vertex_err("container reserve is invalid (%zu/%zu)\n",
+ sizeof(vcon[c_cnt].reserved),
+ sizeof(kcon[c_cnt].reserved));
+ goto p_err_param;
+ }
+ memcpy(vcon->reserved, kcon[c_cnt].reserved,
+ sizeof(vcon->reserved));
+ vcon[c_cnt].count = kcon[c_cnt].count;
+
+ for (f_cnt = 0; f_cnt < queue->flist.count; ++f_cnt) {
+ if (vcon[c_cnt].target ==
+ queue->flist.formats[f_cnt].target) {
+ vcon[c_cnt].format =
+ &queue->flist.formats[f_cnt];
+ break;
+ }
+ }
+
+ if (!vcon[c_cnt].format) {
+ ret = -EINVAL;
+ vertex_err("Format(%u) is not set\n",
+ vcon[c_cnt].target);
+ goto p_err_param;
+ }
+
+ vbuf = vcon[c_cnt].buffers;
+ kbuf = kcon[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < kcon[c_cnt].count; ++b_cnt) {
+ vbuf[b_cnt].roi = kbuf[b_cnt].roi;
+ vbuf[b_cnt].m.fd = kbuf[b_cnt].m.fd;
+ }
+ }
+
+ queue->bufs[kclist->index] = bundle;
+ queue->num_buffers++;
+
+ vertex_leave();
+ return 0;
+p_err_param:
+ kfree(bundle);
+p_err_alloc:
+ return ret;
+}
+
+static void __vertex_queue_bundle_free(struct vertex_queue *queue,
+ struct vertex_bundle *bundle)
+{
+ vertex_enter();
+ queue->num_buffers--;
+ queue->bufs[bundle->clist.index] = NULL;
+ kfree(bundle);
+ vertex_leave();
+}
+
+static int __vertex_queue_s_format(struct vertex_queue *queue,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+ unsigned int idx;
+ struct vertex_buffer_format *vformat;
+ struct vs4l_format *kformat;
+ struct vertex_format_type *fmt;
+
+ vertex_enter();
+ vformat = kcalloc(flist->count, sizeof(*vformat), GFP_KERNEL);
+ if (!vformat) {
+ ret = -ENOMEM;
+ vertex_err("Failed to allocate vformats\n");
+ goto p_err_alloc;
+ }
+ queue->flist.count = flist->count;
+ queue->flist.formats = vformat;
+ kformat = flist->formats;
+
+ for (idx = 0; idx < flist->count; ++idx) {
+ fmt = __vertex_queue_find_format(kformat[idx].format);
+ if (IS_ERR(fmt)) {
+ ret = PTR_ERR(fmt);
+ goto p_err_find;
+ }
+
+ vformat[idx].fmt = fmt;
+ vformat[idx].colorspace = kformat[idx].format;
+ vformat[idx].target = kformat[idx].target;
+ vformat[idx].plane = kformat[idx].plane;
+ vformat[idx].width = kformat[idx].width;
+ vformat[idx].height = kformat[idx].height;
+
+ ret = __vertex_queue_plane_size(&vformat[idx]);
+ if (ret)
+ goto p_err_plane;
+ }
+
+ vertex_leave();
+ return 0;
+p_err_plane:
+p_err_find:
+ kfree(vformat);
+p_err_alloc:
+ return ret;
+}
+
+static int __vertex_queue_qbuf(struct vertex_queue *queue,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vertex_bundle *bundle;
+ struct vertex_container *con;
+ struct vertex_buffer *buf;
+ int dma_dir;
+ unsigned int c_cnt, b_cnt;
+
+ vertex_enter();
+ if (clist->index >= VERTEX_MAX_BUFFER) {
+ ret = -EINVAL;
+ vertex_err("clist index is out of range (%u/%u)\n",
+ clist->index, VERTEX_MAX_BUFFER);
+ goto p_err;
+ }
+
+ if (queue->bufs[clist->index]) {
+ bundle = queue->bufs[clist->index];
+ ret = __vertex_queue_bundle_check(bundle, clist);
+ if (ret)
+ goto p_err;
+ } else {
+ ret = __vertex_queue_bundle_alloc(queue, clist);
+ if (ret)
+ goto p_err;
+
+ bundle = queue->bufs[clist->index];
+ }
+
+ if (bundle->state != VERTEX_BUNDLE_STATE_DEQUEUED) {
+ ret = -EINVAL;
+ vertex_err("bundle is already in use (%u)\n", bundle->state);
+ goto p_err;
+ }
+
+ ret = __vertex_queue_bundle_prepare(queue, bundle);
+ if (ret)
+ goto p_err;
+
+ if (queue->direction == VS4L_DIRECTION_OT)
+ dma_dir = DMA_FROM_DEVICE;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ /* TODO: check sync */
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < con[c_cnt].count; ++b_cnt)
+ __dma_map_area(buf[b_cnt].kvaddr, buf[b_cnt].size,
+ dma_dir);
+ }
+
+ /*
+ * Add to the queued buffers list, a buffer will stay on it until
+ * dequeued in dqbuf.
+ */
+ list_add_tail(&bundle->queued_entry, &queue->queued_list);
+ bundle->state = VERTEX_BUNDLE_STATE_QUEUED;
+ atomic_inc(&queue->queued_count);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static void __vertex_queue_process(struct vertex_queue *queue,
+ struct vertex_bundle *bundle)
+{
+ vertex_enter();
+ bundle->state = VERTEX_BUNDLE_STATE_PROCESS;
+ list_add_tail(&bundle->process_entry, &queue->process_list);
+ atomic_inc(&queue->process_count);
+ vertex_leave();
+}
+
+static int __vertex_queue_dqbuf(struct vertex_queue *queue,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vertex_bundle *bundle;
+
+ vertex_enter();
+ if (queue->direction != clist->direction) {
+ ret = -EINVAL;
+ vertex_err("direction of queue is different (%u/%u)\n",
+ queue->direction, clist->direction);
+ goto p_err;
+ }
+
+ bundle = __vertex_queue_get_done_bundle(queue, clist);
+ if (IS_ERR(bundle)) {
+ ret = PTR_ERR(bundle);
+ goto p_err;
+ }
+
+ if (bundle->state != VERTEX_BUNDLE_STATE_DONE) {
+ ret = -EINVAL;
+ vertex_err("bundle state is not done (%u)\n", bundle->state);
+ goto p_err;
+ }
+
+ /* Fill buffer information for the userspace */
+ __vertex_queue_fill_vs4l_buffer(bundle, clist);
+
+ bundle->state = VERTEX_BUNDLE_STATE_DEQUEUED;
+ list_del(&bundle->queued_entry);
+ atomic_dec(&queue->queued_count);
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int __vertex_queue_start(struct vertex_queue *queue)
+{
+ vertex_enter();
+ queue->streaming = 1;
+ vertex_leave();
+ return 0;
+}
+
+static int __vertex_queue_stop(struct vertex_queue *queue)
+{
+ int ret;
+ struct vertex_bundle **bundle;
+ unsigned int idx;
+
+ vertex_enter();
+ queue->streaming = 0;
+ wake_up_all(&queue->done_wq);
+
+ if (atomic_read(&queue->queued_count)) {
+ ret = -EINVAL;
+ vertex_err("queued list is not empty (%d)\n",
+ atomic_read(&queue->queued_count));
+ goto p_err;
+ }
+
+ if (atomic_read(&queue->process_count)) {
+ ret = -EINVAL;
+ vertex_err("process list is not empty (%d)\n",
+ atomic_read(&queue->process_count));
+ goto p_err;
+ }
+
+ if (atomic_read(&queue->done_count)) {
+ ret = -EINVAL;
+ vertex_err("done list is not empty (%d)\n",
+ atomic_read(&queue->done_count));
+ goto p_err;
+ }
+
+ bundle = queue->bufs;
+ for (idx = 0; queue->num_buffers && idx < VERTEX_MAX_BUFFER; ++idx) {
+ if (!bundle[idx])
+ continue;
+
+ ret = __vertex_queue_bundle_unprepare(queue, bundle[idx]);
+ if (ret)
+ goto p_err;
+
+ __vertex_queue_bundle_free(queue, bundle[idx]);
+ }
+
+ if (queue->num_buffers) {
+ ret = -EINVAL;
+ vertex_err("bundle leak issued (%u)\n", queue->num_buffers);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_queue_poll(struct vertex_queue_list *qlist, struct file *file,
+ struct poll_table_struct *poll)
+{
+ int ret = 0;
+ unsigned long events;
+ struct vertex_queue *inq, *outq;
+
+ vertex_enter();
+ events = poll_requested_events(poll);
+
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ if (events & POLLIN) {
+ if (list_empty(&inq->done_list))
+ poll_wait(file, &inq->done_wq, poll);
+
+ if (list_empty(&inq->done_list))
+ ret |= POLLIN | POLLWRNORM;
+ }
+
+ if (events & POLLOUT) {
+ if (list_empty(&outq->done_list))
+ poll_wait(file, &outq->done_wq, poll);
+
+ if (list_empty(&outq->done_list))
+ ret |= POLLOUT | POLLWRNORM;
+ }
+
+ vertex_leave();
+ return ret;
+}
+
+static int vertex_queue_set_graph(struct vertex_queue_list *qlist,
+ struct vs4l_graph *ginfo)
+{
+ int ret;
+
+ vertex_enter();
+ ret = qlist->gops->set_graph(qlist->vctx->graph, ginfo);
+ if (ret)
+ goto p_err_gops;
+
+ vertex_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+static int vertex_queue_set_format(struct vertex_queue_list *qlist,
+ struct vs4l_format_list *flist)
+{
+ int ret;
+ struct vertex_queue *queue;
+
+ vertex_enter();
+ if (flist->direction == VS4L_DIRECTION_IN)
+ queue = &qlist->in_queue;
+ else
+ queue = &qlist->out_queue;
+
+ ret = qlist->gops->set_format(qlist->vctx->graph, flist);
+ if (ret)
+ goto p_err_gops;
+
+ ret = __vertex_queue_s_format(queue, flist);
+ if (ret)
+ goto p_err_s_format;
+
+ vertex_leave();
+ return 0;
+p_err_s_format:
+p_err_gops:
+ return ret;
+}
+
+static int vertex_queue_set_param(struct vertex_queue_list *qlist,
+ struct vs4l_param_list *plist)
+{
+ int ret;
+
+ vertex_enter();
+ ret = qlist->gops->set_param(qlist->vctx->graph, plist);
+ if (ret)
+ goto p_err_gops;
+
+ vertex_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+static int vertex_queue_set_ctrl(struct vertex_queue_list *qlist,
+ struct vs4l_ctrl *ctrl)
+{
+ int ret;
+
+ vertex_enter();
+ switch (ctrl->ctrl) {
+ default:
+ ret = -EINVAL;
+ vertex_err("vs4l control is invalid(%d)\n", ctrl->ctrl);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+static int vertex_queue_qbuf(struct vertex_queue_list *qlist,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vertex_queue *inq, *outq, *queue;
+ struct vertex_bundle *invb, *otvb;
+
+ vertex_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ if (clist->direction == VS4L_DIRECTION_IN)
+ queue = inq;
+ else
+ queue = outq;
+
+ ret = __vertex_queue_qbuf(queue, clist);
+ if (ret)
+ goto p_err_qbuf;
+
+ if (list_empty(&inq->queued_list) || list_empty(&outq->queued_list))
+ return 0;
+
+ invb = list_first_entry(&inq->queued_list,
+ struct vertex_bundle, queued_entry);
+ otvb = list_first_entry(&outq->queued_list,
+ struct vertex_bundle, queued_entry);
+
+ __vertex_queue_process(inq, invb);
+ __vertex_queue_process(outq, otvb);
+
+ ret = qlist->gops->queue(qlist->vctx->graph,
+ &invb->clist, &otvb->clist);
+ if (ret)
+ goto p_err_gops;
+
+ vertex_leave();
+ return 0;
+p_err_gops:
+p_err_qbuf:
+ return ret;
+}
+
+static int vertex_queue_dqbuf(struct vertex_queue_list *qlist,
+ struct vs4l_container_list *clist)
+{
+ int ret;
+ struct vertex_queue *queue;
+ struct vertex_bundle *bundle;
+
+ vertex_enter();
+ if (clist->direction == VS4L_DIRECTION_IN)
+ queue = &qlist->in_queue;
+ else
+ queue = &qlist->out_queue;
+
+ ret = __vertex_queue_dqbuf(queue, clist);
+ if (ret)
+ goto p_err_dqbuf;
+
+ if (clist->index >= VERTEX_MAX_BUFFER) {
+ ret = -EINVAL;
+ vertex_err("clist index is out of range (%u/%u)\n",
+ clist->index, VERTEX_MAX_BUFFER);
+ goto p_err_index;
+ }
+
+ bundle = queue->bufs[clist->index];
+ if (!bundle) {
+ ret = -EINVAL;
+ vertex_err("bundle(%u) is NULL\n", clist->index);
+ goto p_err_bundle;
+ }
+
+ if (bundle->clist.index != clist->index) {
+ ret = -EINVAL;
+ vertex_err("index of clist is different (%u/%u)\n",
+ bundle->clist.index, clist->index);
+ goto p_err_bundle_index;
+ }
+
+ ret = qlist->gops->deque(qlist->vctx->graph, &bundle->clist);
+ if (ret)
+ goto p_err_gops;
+
+ vertex_leave();
+ return 0;
+p_err_gops:
+p_err_bundle_index:
+p_err_bundle:
+p_err_index:
+p_err_dqbuf:
+ return ret;
+}
+
+static int vertex_queue_streamon(struct vertex_queue_list *qlist)
+{
+ int ret;
+ struct vertex_queue *inq, *outq;
+
+ vertex_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ __vertex_queue_start(inq);
+ __vertex_queue_start(outq);
+
+ ret = qlist->gops->start(qlist->vctx->graph);
+ if (ret)
+ goto p_err_gops;
+
+ vertex_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+static int vertex_queue_streamoff(struct vertex_queue_list *qlist)
+{
+ int ret;
+ struct vertex_queue *inq, *outq;
+
+ vertex_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ ret = qlist->gops->stop(qlist->vctx->graph);
+ if (ret)
+ goto p_err_gops;
+
+ __vertex_queue_stop(inq);
+ __vertex_queue_stop(outq);
+
+ vertex_leave();
+ return 0;
+p_err_gops:
+ return ret;
+}
+
+const struct vertex_context_qops vertex_context_qops = {
+ .poll = vertex_queue_poll,
+ .set_graph = vertex_queue_set_graph,
+ .set_format = vertex_queue_set_format,
+ .set_param = vertex_queue_set_param,
+ .set_ctrl = vertex_queue_set_ctrl,
+ .qbuf = vertex_queue_qbuf,
+ .dqbuf = vertex_queue_dqbuf,
+ .streamon = vertex_queue_streamon,
+ .streamoff = vertex_queue_streamoff
+};
+
+static void __vertex_queue_done(struct vertex_queue *queue,
+ struct vertex_bundle *bundle)
+{
+ int dma_dir;
+ struct vertex_container *con;
+ struct vertex_buffer *buf;
+ unsigned int c_cnt, b_cnt;
+ unsigned long flags;
+
+ vertex_enter();
+ if (queue->direction != bundle->clist.direction) {
+ vertex_err("direction of queue is different (%u/%u)\n",
+ queue->direction, bundle->clist.direction);
+ }
+
+ if (queue->direction == VS4L_DIRECTION_OT)
+ dma_dir = DMA_FROM_DEVICE;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ /* TODO: check sync */
+ con = bundle->clist.containers;
+ for (c_cnt = 0; c_cnt < bundle->clist.count; ++c_cnt) {
+ buf = con[c_cnt].buffers;
+ for (b_cnt = 0; b_cnt < con[c_cnt].count; ++b_cnt)
+ __dma_map_area(buf[b_cnt].kvaddr, buf[b_cnt].size,
+ dma_dir);
+ }
+
+ spin_lock_irqsave(&queue->done_lock, flags);
+
+ list_del(&bundle->process_entry);
+ atomic_dec(&queue->process_count);
+
+ bundle->state = VERTEX_BUNDLE_STATE_DONE;
+ list_add_tail(&bundle->done_entry, &queue->done_list);
+ atomic_inc(&queue->done_count);
+
+ spin_unlock_irqrestore(&queue->done_lock, flags);
+ wake_up(&queue->done_wq);
+ vertex_leave();
+}
+
+void vertex_queue_done(struct vertex_queue_list *qlist,
+ struct vertex_container_list *incl,
+ struct vertex_container_list *otcl,
+ unsigned long flags)
+{
+ struct vertex_queue *inq, *outq;
+ struct vertex_bundle *invb, *otvb;
+
+ vertex_enter();
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ if (!list_empty(&outq->process_list)) {
+ otvb = container_of(otcl, struct vertex_bundle, clist);
+ if (otvb->state == VERTEX_BUNDLE_STATE_PROCESS) {
+ otvb->flags |= flags;
+ __vertex_queue_done(outq, otvb);
+ } else {
+ vertex_err("out-bundle state(%d) is not process\n",
+ otvb->state);
+ }
+ } else {
+ vertex_err("out-queue is empty\n");
+ }
+
+ if (!list_empty(&inq->process_list)) {
+ invb = container_of(incl, struct vertex_bundle, clist);
+ if (invb->state == VERTEX_BUNDLE_STATE_PROCESS) {
+ invb->flags |= flags;
+ __vertex_queue_done(inq, invb);
+ } else {
+ vertex_err("in-bundle state(%u) is not process\n",
+ invb->state);
+ }
+ } else {
+ vertex_err("in-queue is empty\n");
+ }
+
+ vertex_leave();
+}
+
+static void __vertex_queue_init(struct vertex_context *vctx,
+ struct vertex_queue *queue, unsigned int direction)
+{
+ vertex_enter();
+ queue->direction = direction;
+ queue->streaming = 0;
+ queue->lock = &vctx->lock;
+
+ INIT_LIST_HEAD(&queue->queued_list);
+ atomic_set(&queue->queued_count, 0);
+ INIT_LIST_HEAD(&queue->process_list);
+ atomic_set(&queue->process_count, 0);
+ INIT_LIST_HEAD(&queue->done_list);
+ atomic_set(&queue->done_count, 0);
+
+ spin_lock_init(&queue->done_lock);
+ init_waitqueue_head(&queue->done_wq);
+
+ queue->flist.count = 0;
+ queue->flist.formats = NULL;
+ queue->num_buffers = 0;
+ queue->qlist = &vctx->queue_list;
+ vertex_leave();
+}
+
+int vertex_queue_init(struct vertex_context *vctx)
+{
+ struct vertex_queue_list *qlist;
+ struct vertex_queue *inq, *outq;
+
+ vertex_enter();
+ vctx->queue_ops = &vertex_context_qops;
+
+ qlist = &vctx->queue_list;
+ qlist->vctx = vctx;
+ qlist->gops = &vertex_queue_gops;
+
+ inq = &qlist->in_queue;
+ outq = &qlist->out_queue;
+
+ __vertex_queue_init(vctx, inq, VS4L_DIRECTION_IN);
+ __vertex_queue_init(vctx, outq, VS4L_DIRECTION_OT);
+
+ vertex_leave();
+ return 0;
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_QUEUE_H__
+#define __VERTEX_QUEUE_H__
+
+#include <linux/poll.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include "vertex-config.h"
+#include "vs4l.h"
+#include "vertex-memory.h"
+#include "vertex-graph.h"
+
+struct vertex_context;
+struct vertex_queue_list;
+
+struct vertex_format_type {
+ char *name;
+ unsigned int colorspace;
+ unsigned int planes;
+ unsigned int bitsperpixel[VERTEX_MAX_PLANES];
+};
+
+struct vertex_buffer_format {
+ unsigned int target;
+ struct vertex_format_type *fmt;
+ unsigned int colorspace;
+ unsigned int plane;
+ unsigned int width;
+ unsigned int height;
+ unsigned int size[VERTEX_MAX_PLANES];
+};
+
+struct vertex_buffer_format_list {
+ unsigned int count;
+ struct vertex_buffer_format *formats;
+};
+
+struct vertex_container {
+ unsigned int type;
+ unsigned int target;
+ unsigned int memory;
+ unsigned int reserved[4];
+ unsigned int count;
+ struct vertex_buffer *buffers;
+ struct vertex_buffer_format *format;
+};
+
+struct vertex_container_list {
+ unsigned int direction;
+ unsigned int id;
+ unsigned int index;
+ unsigned long flags;
+ struct timeval timestamp[6];
+ unsigned int count;
+ unsigned int user_params[MAX_NUM_OF_USER_PARAMS];
+ struct vertex_container *containers;
+};
+
+enum vertex_bundle_state {
+ VERTEX_BUNDLE_STATE_DEQUEUED,
+ VERTEX_BUNDLE_STATE_QUEUED,
+ VERTEX_BUNDLE_STATE_PROCESS,
+ VERTEX_BUNDLE_STATE_DONE,
+};
+
+struct vertex_bundle {
+ unsigned long flags;
+ unsigned int state;
+ struct list_head queued_entry;
+ struct list_head process_entry;
+ struct list_head done_entry;
+
+ struct vertex_container_list clist;
+};
+
+struct vertex_queue {
+ unsigned int direction;
+ unsigned int streaming;
+ struct mutex *lock;
+
+ struct list_head queued_list;
+ atomic_t queued_count;
+ struct list_head process_list;
+ atomic_t process_count;
+ struct list_head done_list;
+ atomic_t done_count;
+
+ spinlock_t done_lock;
+ wait_queue_head_t done_wq;
+
+ struct vertex_buffer_format_list flist;
+ struct vertex_bundle *bufs[VERTEX_MAX_BUFFER];
+ unsigned int num_buffers;
+
+ struct vertex_queue_list *qlist;
+};
+
+struct vertex_queue_gops {
+ int (*set_graph)(struct vertex_graph *graph, struct vs4l_graph *ginfo);
+ int (*set_format)(struct vertex_graph *graph,
+ struct vs4l_format_list *flist);
+ int (*set_param)(struct vertex_graph *graph,
+ struct vs4l_param_list *plist);
+ int (*set_ctrl)(struct vertex_graph *graph, struct vs4l_ctrl *ctrl);
+ int (*queue)(struct vertex_graph *graph,
+ struct vertex_container_list *incl,
+ struct vertex_container_list *otcl);
+ int (*deque)(struct vertex_graph *graph,
+ struct vertex_container_list *clist);
+ int (*start)(struct vertex_graph *graph);
+ int (*stop)(struct vertex_graph *graph);
+};
+
+struct vertex_queue_list {
+ struct vertex_queue in_queue;
+ struct vertex_queue out_queue;
+ const struct vertex_queue_gops *gops;
+ struct vertex_context *vctx;
+};
+
+void vertex_queue_done(struct vertex_queue_list *qlist,
+ struct vertex_container_list *inclist,
+ struct vertex_container_list *otclist,
+ unsigned long flags);
+
+int vertex_queue_init(struct vertex_context *vctx);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-message.h"
+#include "vertex-slab.h"
+
+int vertex_slab_alloc(struct vertex_slab_allocator *allocator, void **target,
+ size_t size)
+{
+ int ret;
+
+ vertex_enter();
+ if (size <= allocator->cache_size) {
+ ret = -EINVAL;
+ vertex_err("size is too big tahn slab size (%zu, %zu)\n",
+ size, allocator->cache_size);
+ goto p_err;
+ }
+
+ *target = kmem_cache_alloc(allocator->cache, GFP_KERNEL);
+ if (!(*target)) {
+ ret = -ENOMEM;
+ vertex_err("kmem_cache_alloc is fail\n");
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+void vertex_slab_free(struct vertex_slab_allocator *allocator, void *target)
+{
+ vertex_enter();
+ kmem_cache_free(allocator->cache, target);
+ vertex_leave();
+}
+
+int vertex_slab_init(struct vertex_slab_allocator *allocator)
+{
+ int ret;
+ char name[100];
+ size_t size;
+
+ vertex_enter();
+ size = sizeof(struct vertex_message);
+ allocator->cache_size = size;
+
+ snprintf(name, sizeof(name), "vertex-slab-%zu", size);
+
+ allocator->cache = kmem_cache_create(name, size,
+ ARCH_KMALLOC_MINALIGN, SLAB_POISON | SLAB_PANIC, NULL);
+ if (!allocator->cache) {
+ ret = -ENOMEM;
+ vertex_err("kmem_cache_create(%zu) is fail\n", size);
+ goto p_err;
+ }
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+void vertex_slab_deinit(struct vertex_slab_allocator *allocator)
+{
+ kmem_cache_destroy(allocator->cache);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_SLAB_H__
+#define __VERTEX_SLAB_H__
+
+#include <linux/slab.h>
+
+struct vertex_slab_allocator {
+ size_t cache_size;
+ struct kmem_cache *cache;
+};
+
+int vertex_slab_alloc(struct vertex_slab_allocator *allocator, void **target,
+ size_t size);
+void vertex_slab_free(struct vertex_slab_allocator *allocator, void *target);
+
+int vertex_slab_init(struct vertex_slab_allocator *allocator);
+void vertex_slab_deinit(struct vertex_slab_allocator *allocator);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/pm_qos.h>
+#include <linux/exynos_iovmm.h>
+
+#include "vertex-log.h"
+#include "vertex-device.h"
+#include "vertex-system.h"
+
+#define CAM_L0 (690000)
+#define CAM_L1 (680000)
+#define CAM_L2 (670000)
+#define CAM_L3 (660000)
+#define CAM_L4 (650000)
+
+#define MIF_L0 (2093000)
+#define MIF_L1 (2002000)
+#define MIF_L2 (1794000)
+#define MIF_L3 (1539000)
+#define MIF_L4 (1352000)
+#define MIF_L5 (1014000)
+#define MIF_L6 (845000)
+#define MIF_L7 (676000)
+
+#if defined(CONFIG_PM_DEVFREQ)
+struct pm_qos_request exynos_vertex_qos_cam;
+struct pm_qos_request exynos_vertex_qos_mem;
+#endif
+
+int vertex_system_fw_bootup(struct vertex_system *sys)
+{
+ int ret;
+ struct vertex_memory *mem;
+ struct vertex_binary *bin;
+
+ struct {
+ unsigned int sign;
+ unsigned int mbox_addr;
+ unsigned int mbox_size;
+ unsigned int debug_addr;
+ unsigned int debug_size;
+ } *shared_mem;
+
+ vertex_enter();
+ /* TODO check cam/bus */
+ //execl("/system/bin/cp", "/system/bin/cp", "/d/pm_qos/cam_throughput",
+ // "/data", (char *)0);
+ //execl("/system/bin/cp", "/system/bin/cp", "/d/pm_qos/bus_throughput",
+ // "/data", (char *)0);
+ mem = &sys->memory;
+ bin = &sys->binary;
+
+ ret = sys->ctrl_ops->reset(sys);
+ if (ret)
+ goto p_err;
+
+ ret = vertex_binary_read(bin, NULL, VERTEX_FW_DRAM_NAME, mem->fw.kvaddr,
+ mem->fw.size);
+ if (ret)
+ goto p_err;
+
+ ret = vertex_binary_read(bin, NULL, VERTEX_FW_DTCM_NAME, sys->dtcm,
+ sys->dtcm_size);
+ if (ret)
+ goto p_err;
+
+ shared_mem = sys->dtcm + 0x3FD0;
+ shared_mem->sign = 0xABCDEFAB;
+ shared_mem->mbox_addr = mem->mbox.dvaddr;
+ shared_mem->mbox_size = mem->mbox.size;
+ shared_mem->debug_addr = mem->debug.dvaddr;
+ shared_mem->debug_size = mem->debug.size;
+
+ ret = vertex_binary_read(bin, NULL, VERTEX_FW_ITCM_NAME,
+ sys->itcm, sys->itcm_size);
+ if (ret)
+ goto p_err;
+
+ ret = sys->ctrl_ops->start(sys);
+ if (ret)
+ goto p_err;
+
+ ret = vertex_hw_wait_bootup(&sys->interface);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+ return 0;
+p_err:
+ return ret;
+}
+
+int vertex_system_start(struct vertex_system *sys)
+{
+ int ret;
+
+ vertex_enter();
+ ret = vertex_interface_start(&sys->interface);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+int vertex_system_stop(struct vertex_system *sys)
+{
+ int ret;
+
+ vertex_enter();
+ ret = vertex_interface_stop(&sys->interface);
+ if (ret)
+ goto p_err;
+
+ vertex_leave();
+p_err:
+ return ret;
+}
+
+int vertex_system_resume(struct vertex_system *sys)
+{
+ int ret;
+
+ vertex_enter();
+#if defined(CONFIG_PM_DEVFREQ)
+ pm_qos_add_request(&exynos_vertex_qos_cam, PM_QOS_CAM_THROUGHPUT,
+ CAM_L0);
+ pm_qos_add_request(&exynos_vertex_qos_mem, PM_QOS_BUS_THROUGHPUT,
+ MIF_L0);
+#endif
+
+ ret = sys->clk_ops->clk_on(sys);
+ if (ret)
+ goto p_err_clk_on;
+
+ sys->clk_ops->clk_dump(sys);
+
+ ret = iovmm_activate(sys->dev);
+ if (ret) {
+ vertex_err("Failed to activate iommu (%d)\n", ret);
+ goto p_err_iovmm;
+ }
+
+ vertex_leave();
+ return 0;
+p_err_iovmm:
+ sys->clk_ops->clk_off(sys);
+p_err_clk_on:
+ pm_qos_remove_request(&exynos_vertex_qos_mem);
+ pm_qos_remove_request(&exynos_vertex_qos_cam);
+ return ret;
+}
+
+int vertex_system_suspend(struct vertex_system *sys)
+{
+ vertex_enter();
+ iovmm_deactivate(sys->dev);
+
+ //TODO check this
+ //ret = sys->ctrl_ops->reset(sys);
+ //if (ret)
+ // vertex_err("Failed to reset for power down (%d)\n", ret);
+
+ sys->clk_ops->clk_off(sys);
+
+#if defined(CONFIG_PM_DEVFREQ)
+ pm_qos_remove_request(&exynos_vertex_qos_mem);
+ pm_qos_remove_request(&exynos_vertex_qos_cam);
+#endif
+
+ vertex_leave();
+ return 0;
+}
+
+int vertex_system_open(struct vertex_system *sys)
+{
+ int ret;
+
+ vertex_enter();
+ ret = vertex_memory_open(&sys->memory);
+ if (ret)
+ goto p_err_memory;
+
+ ret = vertex_interface_open(&sys->interface,
+ sys->memory.mbox.kvaddr);
+ if (ret)
+ goto p_err_interface;
+
+ ret = vertex_graphmgr_open(&sys->graphmgr);
+ if (ret)
+ goto p_err_graphmgr;
+
+ vertex_leave();
+ return 0;
+p_err_graphmgr:
+ vertex_interface_close(&sys->interface);
+p_err_interface:
+ vertex_memory_close(&sys->memory);
+p_err_memory:
+ return ret;
+}
+
+int vertex_system_close(struct vertex_system *sys)
+{
+ vertex_enter();
+ vertex_graphmgr_close(&sys->graphmgr);
+ vertex_interface_close(&sys->interface);
+ vertex_memory_close(&sys->memory);
+ vertex_leave();
+ return 0;
+}
+
+int vertex_system_probe(struct vertex_device *device)
+{
+ int ret;
+ struct vertex_system *sys;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct resource *res;
+ void __iomem *iomem;
+
+ vertex_enter();
+ sys = &device->system;
+ sys->device = device;
+
+ pdev = to_platform_device(device->dev);
+ dev = device->dev;
+ sys->dev = dev;
+
+ /* VERTEX_CPU_SS1 */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -EINVAL;
+ vertex_err("platform_get_resource(0) is fail");
+ goto p_err_res_ss1;
+ }
+
+ iomem = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vertex_err("devm_ioremap_resource(0) is fail (%d)", ret);
+ goto p_err_remap_ss1;
+ }
+
+ sys->reg_ss[VERTEX_REG_SS1] = iomem;
+ sys->reg_ss_size[VERTEX_REG_SS1] = resource_size(res);
+
+ /* VERTEX_CPU_SS2 */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ ret = -EINVAL;
+ vertex_err("platform_get_resource(1) is fail");
+ goto p_err_res_ss2;
+ }
+
+ iomem = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vertex_err("devm_ioremap_resource(1) is fail (%d)", ret);
+ goto p_err_remap_ss2;
+ }
+
+ sys->reg_ss[VERTEX_REG_SS2] = iomem;
+ sys->reg_ss_size[VERTEX_REG_SS2] = resource_size(res);
+
+ /* VERTEX ITCM */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ ret = -EINVAL;
+ vertex_err("platform_get_resource(2) is fail");
+ goto p_err_res_itcm;
+ }
+
+ iomem = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vertex_err("devm_ioremap_resource(2) is fail (%d)", ret);
+ goto p_err_remap_itcm;
+ }
+
+ sys->itcm = iomem;
+ sys->itcm_size = resource_size(res);
+
+ /* VERTEX DTCM */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ ret = -EINVAL;
+ vertex_err("platform_get_resource(3) is fail");
+ goto p_err_res_dtcm;
+ }
+
+ iomem = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(iomem)) {
+ ret = PTR_ERR(iomem);
+ vertex_err("devm_ioremap_resource(3) is fail (%d)", ret);
+ goto p_err_remap_dtcm;
+ }
+
+ sys->dtcm = iomem;
+ sys->dtcm_size = resource_size(res);
+
+ sys->cam_qos = 0;
+ sys->mif_qos = 0;
+
+ sys->clk_ops = &vertex_clk_ops;
+ sys->ctrl_ops = &vertex_ctrl_ops;
+ sys->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(sys->pinctrl)) {
+ ret = PTR_ERR(sys->pinctrl);
+ vertex_err("Failed to get devm pinctrl (%d)\n", ret);
+ goto p_err_pinctrl;
+ }
+
+ ret = sys->clk_ops->clk_init(sys);
+ if (ret)
+ goto p_err_clk;
+
+ ret = vertex_memory_probe(sys);
+ if (ret)
+ goto p_err_memory;
+
+ ret = vertex_interface_probe(sys);
+ if (ret)
+ goto p_err_interface;
+
+ ret = vertex_binary_init(sys);
+ if (ret)
+ goto p_err_binary;
+
+ ret = vertex_graphmgr_probe(sys);
+ if (ret)
+ goto p_err_graphmgr;
+
+ vertex_leave();
+ return 0;
+p_err_graphmgr:
+ vertex_binary_deinit(&sys->binary);
+p_err_binary:
+ vertex_interface_remove(&sys->interface);
+p_err_interface:
+ vertex_memory_remove(&sys->memory);
+p_err_memory:
+ sys->clk_ops->clk_deinit(sys);
+p_err_clk:
+ devm_pinctrl_put(sys->pinctrl);
+p_err_pinctrl:
+ devm_iounmap(dev, sys->dtcm);
+p_err_remap_dtcm:
+p_err_res_dtcm:
+ devm_iounmap(dev, sys->itcm);
+p_err_remap_itcm:
+p_err_res_itcm:
+ devm_iounmap(dev, sys->reg_ss[VERTEX_REG_SS2]);
+p_err_remap_ss2:
+p_err_res_ss2:
+ devm_iounmap(dev, sys->reg_ss[VERTEX_REG_SS1]);
+p_err_remap_ss1:
+p_err_res_ss1:
+ return ret;
+}
+
+void vertex_system_remove(struct vertex_system *sys)
+{
+ vertex_enter();
+ vertex_graphmgr_remove(&sys->graphmgr);
+ vertex_binary_deinit(&sys->binary);
+ vertex_interface_remove(&sys->interface);
+ vertex_memory_remove(&sys->memory);
+ sys->clk_ops->clk_deinit(sys);
+ devm_pinctrl_put(sys->pinctrl);
+ devm_iounmap(sys->dev, sys->dtcm);
+ devm_iounmap(sys->dev, sys->itcm);
+ devm_iounmap(sys->dev, sys->reg_ss[VERTEX_REG_SS2]);
+ devm_iounmap(sys->dev, sys->reg_ss[VERTEX_REG_SS1]);
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_SYSTEM_H__
+#define __VERTEX_SYSTEM_H__
+
+#include "platform/vertex-clk.h"
+#include "platform/vertex-ctrl.h"
+#include "vertex-interface.h"
+#include "vertex-memory.h"
+#include "vertex-binary.h"
+#include "vertex-graphmgr.h"
+
+struct vertex_device;
+
+struct vertex_system {
+ struct device *dev;
+ void __iomem *reg_ss[VERTEX_REG_MAX];
+ resource_size_t reg_ss_size[VERTEX_REG_MAX];
+ void __iomem *itcm;
+ resource_size_t itcm_size;
+ void __iomem *dtcm;
+ resource_size_t dtcm_size;
+
+ unsigned int cam_qos;
+ unsigned int mif_qos;
+
+ const struct vertex_clk_ops *clk_ops;
+ const struct vertex_ctrl_ops *ctrl_ops;
+ struct pinctrl *pinctrl;
+ struct vertex_memory memory;
+ struct vertex_interface interface;
+ struct vertex_binary binary;
+ struct vertex_graphmgr graphmgr;
+
+ struct vertex_device *device;
+};
+
+int vertex_system_fw_bootup(struct vertex_system *sys);
+
+int vertex_system_resume(struct vertex_system *sys);
+int vertex_system_suspend(struct vertex_system *sys);
+
+int vertex_system_start(struct vertex_system *sys);
+int vertex_system_stop(struct vertex_system *sys);
+int vertex_system_open(struct vertex_system *sys);
+int vertex_system_close(struct vertex_system *sys);
+
+int vertex_system_probe(struct vertex_device *device);
+void vertex_system_remove(struct vertex_system *sys);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-taskmgr.h"
+
+static void __vertex_task_s_free(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ task->state = VERTEX_TASK_STATE_FREE;
+ list_add_tail(&task->list, &tmgr->fre_list);
+ tmgr->fre_cnt++;
+ vertex_leave();
+}
+
+static struct vertex_task *__vertex_task_get_first_free(
+ struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task;
+
+ vertex_enter();
+ if (tmgr->fre_cnt)
+ task = list_first_entry(&tmgr->fre_list,
+ struct vertex_task, list);
+ else
+ task = NULL;
+ vertex_leave();
+ return task;
+}
+
+static void __vertex_task_s_request(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ task->state = VERTEX_TASK_STATE_REQUEST;
+ list_add_tail(&task->list, &tmgr->req_list);
+ tmgr->req_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_task_s_prepare(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ task->state = VERTEX_TASK_STATE_PREPARE;
+ list_add_tail(&task->list, &tmgr->pre_list);
+ tmgr->pre_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_task_s_process(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ task->state = VERTEX_TASK_STATE_PROCESS;
+ list_add_tail(&task->list, &tmgr->pro_list);
+ tmgr->pro_cnt++;
+ vertex_leave();
+}
+
+static void __vertex_task_s_complete(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ task->state = VERTEX_TASK_STATE_COMPLETE;
+ list_add_tail(&task->list, &tmgr->com_list);
+ tmgr->com_cnt++;
+ vertex_leave();
+}
+
+void vertex_task_print_free_list(struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task, *temp;
+ int idx = 0;
+
+ vertex_enter();
+ vertex_dbg("fre(%d, %d)\n", tmgr->id, tmgr->fre_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->fre_list, list) {
+ vertex_dbg("fre[%d] %d\n", idx++, task->index);
+ }
+ vertex_leave();
+}
+
+
+void vertex_task_print_request_list(struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task, *temp;
+ int idx = 0;
+
+ vertex_enter();
+ vertex_dbg("req(%d, %d)\n", tmgr->id, tmgr->req_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->req_list, list) {
+ vertex_dbg("req[%d] %d\n", idx++, task->index);
+ }
+ vertex_leave();
+}
+
+void vertex_task_print_prepare_list(struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task, *temp;
+ int idx = 0;
+
+ vertex_enter();
+ vertex_dbg("pre(%d, %d)\n", tmgr->id, tmgr->pre_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->pre_list, list) {
+ vertex_dbg("pre[%d] %d\n", idx++, task->index);
+ }
+ vertex_leave();
+}
+
+void vertex_task_print_process_list(struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task, *temp;
+ int idx = 0;
+
+ vertex_enter();
+ vertex_dbg("pro(%d, %d)\n", tmgr->id, tmgr->pro_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->pro_list, list) {
+ vertex_dbg("pro[%d] %d\n", idx++, task->index);
+ }
+ vertex_leave();
+}
+
+void vertex_task_print_complete_list(struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task, *temp;
+ int idx = 0;
+
+ vertex_enter();
+ vertex_dbg("com(%d, %d)\n", tmgr->id, tmgr->com_cnt);
+ list_for_each_entry_safe(task, temp, &tmgr->com_list, list) {
+ vertex_dbg("com[%d] %d\n", idx++, task->index);
+ }
+ vertex_leave();
+}
+
+void vertex_task_print_all(struct vertex_taskmgr *tmgr)
+{
+ vertex_enter();
+ vertex_task_print_free_list(tmgr);
+ vertex_task_print_request_list(tmgr);
+ vertex_task_print_prepare_list(tmgr);
+ vertex_task_print_process_list(tmgr);
+ vertex_task_print_complete_list(tmgr);
+ vertex_leave();
+}
+
+static int __vertex_task_check_free(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ if (!tmgr->fre_cnt) {
+ vertex_warn("fre_cnt is zero\n");
+ return -EINVAL;
+ }
+
+ if (task->state != VERTEX_TASK_STATE_FREE) {
+ vertex_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __vertex_task_check_request(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ if (!tmgr->req_cnt) {
+ vertex_warn("req_cnt is zero\n");
+ return -EINVAL;
+ }
+
+ if (task->state != VERTEX_TASK_STATE_REQUEST) {
+ vertex_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __vertex_task_check_prepare(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ if (!tmgr->pre_cnt) {
+ vertex_warn("pre_cnt is zero\n");
+ return -EINVAL;
+ }
+
+ if (task->state != VERTEX_TASK_STATE_PREPARE) {
+ vertex_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __vertex_task_check_process(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ if (!tmgr->pro_cnt) {
+ vertex_warn("pro_cnt is zero\n");
+ return -EINVAL;
+ }
+
+ if (task->state != VERTEX_TASK_STATE_PROCESS) {
+ vertex_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __vertex_task_check_complete(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ if (!tmgr->com_cnt) {
+ vertex_warn("com_cnt is zero\n");
+ return -EINVAL;
+ }
+
+ if (task->state != VERTEX_TASK_STATE_COMPLETE) {
+ vertex_warn("state(%x) is invlid\n", task->state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void vertex_task_trans_fre_to_req(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_free(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->fre_cnt--;
+ __vertex_task_s_request(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_req_to_pre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_request(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->req_cnt--;
+ __vertex_task_s_prepare(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_req_to_pro(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_request(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->req_cnt--;
+ __vertex_task_s_process(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_req_to_com(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_request(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->req_cnt--;
+ __vertex_task_s_complete(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_req_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_request(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->req_cnt--;
+ __vertex_task_s_free(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_pre_to_pro(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_prepare(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->pre_cnt--;
+ __vertex_task_s_process(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_pre_to_com(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_prepare(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->pre_cnt--;
+ __vertex_task_s_complete(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_pre_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_prepare(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->pre_cnt--;
+ __vertex_task_s_free(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_pro_to_com(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_process(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->pro_cnt--;
+ __vertex_task_s_complete(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_pro_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_process(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->pro_cnt--;
+ __vertex_task_s_free(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_com_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+ if (__vertex_task_check_complete(tmgr, task))
+ return;
+
+ list_del(&task->list);
+ tmgr->com_cnt--;
+ __vertex_task_s_free(tmgr, task);
+ vertex_leave();
+}
+
+void vertex_task_trans_any_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task)
+{
+ vertex_enter();
+
+ list_del(&task->list);
+ switch (task->state) {
+ case VERTEX_TASK_STATE_REQUEST:
+ tmgr->req_cnt--;
+ break;
+ case VERTEX_TASK_STATE_PREPARE:
+ tmgr->pre_cnt--;
+ break;
+ case VERTEX_TASK_STATE_PROCESS:
+ tmgr->pro_cnt--;
+ break;
+ case VERTEX_TASK_STATE_COMPLETE:
+ tmgr->com_cnt--;
+ break;
+ default:
+ vertex_err("state(%d) of task is invalid\n", task->state);
+ return;
+ }
+
+ __vertex_task_s_free(tmgr, task);
+ vertex_leave();
+}
+
+struct vertex_task *vertex_task_pick_fre_to_req(struct vertex_taskmgr *tmgr)
+{
+ struct vertex_task *task;
+
+ vertex_enter();
+ task = __vertex_task_get_first_free(tmgr);
+ if (task)
+ vertex_task_trans_fre_to_req(tmgr, task);
+ vertex_leave();
+ return task;
+}
+
+int vertex_task_init(struct vertex_taskmgr *tmgr, unsigned int id, void *owner)
+{
+ unsigned long flags;
+ unsigned int index;
+
+ vertex_enter();
+ spin_lock_irqsave(&tmgr->slock, flags);
+ tmgr->id = id;
+ tmgr->sindex = 0;
+
+ tmgr->tot_cnt = VERTEX_MAX_TASK;
+ tmgr->fre_cnt = 0;
+ tmgr->req_cnt = 0;
+ tmgr->pre_cnt = 0;
+ tmgr->pro_cnt = 0;
+ tmgr->com_cnt = 0;
+
+ INIT_LIST_HEAD(&tmgr->fre_list);
+ INIT_LIST_HEAD(&tmgr->req_list);
+ INIT_LIST_HEAD(&tmgr->pre_list);
+ INIT_LIST_HEAD(&tmgr->pro_list);
+ INIT_LIST_HEAD(&tmgr->com_list);
+
+ /*
+ * task index 0 means invalid
+ * because firmware can't accept 0 invocation id
+ */
+ for (index = 1; index < tmgr->tot_cnt; ++index) {
+ tmgr->task[index].index = index;
+ tmgr->task[index].owner = owner;
+ tmgr->task[index].findex = VERTEX_MAX_TASK;
+ tmgr->task[index].tdindex = VERTEX_MAX_TASKDESC;
+ __vertex_task_s_free(tmgr, &tmgr->task[index]);
+ }
+
+ spin_unlock_irqrestore(&tmgr->slock, flags);
+
+ vertex_leave();
+ return 0;
+}
+
+int vertex_task_deinit(struct vertex_taskmgr *tmgr)
+{
+ unsigned long flags;
+ unsigned int index;
+
+ vertex_enter();
+ spin_lock_irqsave(&tmgr->slock, flags);
+
+ for (index = 0; index < tmgr->tot_cnt; ++index)
+ __vertex_task_s_free(tmgr, &tmgr->task[index]);
+
+ spin_unlock_irqrestore(&tmgr->slock, flags);
+
+ vertex_leave();
+ return 0;
+}
+
+void vertex_task_flush(struct vertex_taskmgr *tmgr)
+{
+ unsigned long flags;
+ struct vertex_task *task, *temp;
+
+ vertex_enter();
+ spin_lock_irqsave(&tmgr->slock, flags);
+
+ list_for_each_entry_safe(task, temp, &tmgr->req_list, list) {
+ vertex_task_trans_req_to_fre(tmgr, task);
+ vertex_warn("req task(%d) is flushed(count:%d)",
+ task->id, tmgr->req_cnt);
+ }
+
+ list_for_each_entry_safe(task, temp, &tmgr->pre_list, list) {
+ vertex_task_trans_pre_to_fre(tmgr, task);
+ vertex_warn("pre task(%d) is flushed(count:%d)",
+ task->id, tmgr->pre_cnt);
+ }
+
+ list_for_each_entry_safe(task, temp, &tmgr->pro_list, list) {
+ vertex_task_trans_pro_to_fre(tmgr, task);
+ vertex_warn("pro task(%d) is flushed(count:%d)",
+ task->id, tmgr->pro_cnt);
+ }
+
+ list_for_each_entry_safe(task, temp, &tmgr->com_list, list) {
+ vertex_task_trans_com_to_fre(tmgr, task);
+ vertex_warn("com task(%d) is flushed(count:%d)",
+ task->id, tmgr->com_cnt);
+ }
+
+ spin_unlock_irqrestore(&tmgr->slock, flags);
+
+ if (tmgr->fre_cnt != (tmgr->tot_cnt - 1))
+ vertex_warn("free count leaked (%d/%d/%d/%d/%d/%d)\n",
+ tmgr->fre_cnt, tmgr->req_cnt,
+ tmgr->pre_cnt, tmgr->pro_cnt,
+ tmgr->com_cnt, tmgr->tot_cnt);
+ vertex_leave();
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_TASKMGR_H__
+#define __VERTEX_TASKMGR_H__
+
+#include <linux/kthread.h>
+
+#include "vertex-config.h"
+#include "vertex-time.h"
+
+#define TASKMGR_IDX_0 (1 << 0) /* graphmgr thread */
+#define TASKMGR_IDX_1 (1 << 1)
+#define TASKMGR_IDX_2 (1 << 2)
+#define TASKMGR_IDX_3 (1 << 3)
+#define TASKMGR_IDX_4 (1 << 4)
+#define TASKMGR_IDX_5 (1 << 5)
+#define TASKMGR_IDX_6 (1 << 6)
+#define TASKMGR_IDX_7 (1 << 7)
+#define TASKMGR_IDX_8 (1 << 8)
+#define TASKMGR_IDX_9 (1 << 9)
+#define TASKMGR_IDX_10 (1 << 10)
+#define TASKMGR_IDX_11 (1 << 11)
+#define TASKMGR_IDX_12 (1 << 12)
+#define TASKMGR_IDX_13 (1 << 13)
+#define TASKMGR_IDX_14 (1 << 14)
+#define TASKMGR_IDX_15 (1 << 15)
+#define TASKMGR_IDX_16 (1 << 16)
+#define TASKMGR_IDX_17 (1 << 17)
+#define TASKMGR_IDX_18 (1 << 18)
+#define TASKMGR_IDX_19 (1 << 19)
+#define TASKMGR_IDX_20 (1 << 20)
+#define TASKMGR_IDX_21 (1 << 21)
+#define TASKMGR_IDX_22 (1 << 22)
+#define TASKMGR_IDX_23 (1 << 23)
+#define TASKMGR_IDX_24 (1 << 24)
+#define TASKMGR_IDX_25 (1 << 25)
+#define TASKMGR_IDX_26 (1 << 26)
+#define TASKMGR_IDX_27 (1 << 27)
+#define TASKMGR_IDX_28 (1 << 28)
+#define TASKMGR_IDX_29 (1 << 29)
+#define TASKMGR_IDX_30 (1 << 30)
+#define TASKMGR_IDX_31 (1 << 31)
+
+#define taskmgr_e_barrier_irqs(taskmgr, index, flag) \
+ do { \
+ taskmgr->sindex |= index; \
+ spin_lock_irqsave(&taskmgr->slock, flag); \
+ } while (0)
+
+#define taskmgr_x_barrier_irqr(taskmgr, index, flag) \
+ do { \
+ spin_unlock_irqrestore(&taskmgr->slock, flag); \
+ taskmgr->sindex &= ~index; \
+ } while (0)
+
+#define taskmgr_e_barrier_irq(taskmgr, index) \
+ do { \
+ taskmgr->sindex |= index; \
+ spin_lock_irq(&taskmgr->slock); \
+ } while (0)
+
+#define taskmgr_x_barrier_irq(taskmgr, index) \
+ do { \
+ spin_unlock_irq(&taskmgr->slock); \
+ taskmgr->sindex &= ~index; \
+ } while (0)
+
+#define taskmgr_e_barrier(taskmgr, index) \
+ do { \
+ taskmgr->sindex |= index; \
+ spin_lock(&taskmgr->slock); \
+ } while (0)
+
+#define taskmgr_x_barrier(taskmgr, index) \
+ do { \
+ spin_unlock(&taskmgr->slock); \
+ taskmgr->sindex &= ~index; \
+ } while (0)
+
+enum vertex_task_state {
+ VERTEX_TASK_STATE_FREE = 1,
+ VERTEX_TASK_STATE_REQUEST,
+ VERTEX_TASK_STATE_PREPARE,
+ VERTEX_TASK_STATE_PROCESS,
+ VERTEX_TASK_STATE_COMPLETE,
+ VERTEX_TASK_STATE_INVALID
+};
+
+enum vertex_task_flag {
+ VERTEX_TASK_FLAG_IOCPY = 16
+};
+
+enum vertex_task_message {
+ VERTEX_TASK_INIT = 1,
+ VERTEX_TASK_DEINIT,
+ VERTEX_TASK_CREATE,
+ VERTEX_TASK_DESTROY,
+ VERTEX_TASK_ALLOCATE,
+ VERTEX_TASK_PROCESS,
+ VERTEX_TASK_REQUEST = 10,
+ VERTEX_TASK_DONE,
+ VERTEX_TASK_NDONE
+};
+
+enum vertex_control_message {
+ VERTEX_CTRL_NONE = 100,
+ VERTEX_CTRL_STOP,
+ VERTEX_CTRL_STOP_DONE
+};
+
+struct vertex_task {
+ struct list_head list;
+ struct kthread_work work;
+ unsigned int state;
+
+ unsigned int message;
+ unsigned long param0;
+ unsigned long param1;
+ unsigned long param2;
+ unsigned long param3;
+
+ struct vertex_container_list *incl;
+ struct vertex_container_list *otcl;
+ unsigned long flags;
+
+ unsigned int id;
+ struct mutex *lock;
+ unsigned int index;
+ unsigned int findex;
+ unsigned int tdindex;
+ void *owner;
+
+ struct vertex_time time[VERTEX_TIME_COUNT];
+};
+
+struct vertex_taskmgr {
+ unsigned int id;
+ unsigned int sindex;
+ spinlock_t slock;
+ struct vertex_task task[VERTEX_MAX_TASK];
+
+ struct list_head fre_list;
+ struct list_head req_list;
+ struct list_head pre_list;
+ struct list_head pro_list;
+ struct list_head com_list;
+
+ unsigned int tot_cnt;
+ unsigned int fre_cnt;
+ unsigned int req_cnt;
+ unsigned int pre_cnt;
+ unsigned int pro_cnt;
+ unsigned int com_cnt;
+};
+
+void vertex_task_print_free_list(struct vertex_taskmgr *tmgr);
+void vertex_task_print_request_list(struct vertex_taskmgr *tmgr);
+void vertex_task_print_prepare_list(struct vertex_taskmgr *tmgr);
+void vertex_task_print_process_list(struct vertex_taskmgr *tmgr);
+void vertex_task_print_complete_list(struct vertex_taskmgr *tmgr);
+void vertex_task_print_all(struct vertex_taskmgr *tmgr);
+
+void vertex_task_trans_fre_to_req(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_req_to_pre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_req_to_pro(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_req_to_com(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_req_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_pre_to_pro(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_pre_to_com(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_pre_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_pro_to_com(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_pro_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_com_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+void vertex_task_trans_any_to_fre(struct vertex_taskmgr *tmgr,
+ struct vertex_task *task);
+
+struct vertex_task *vertex_task_pick_fre_to_req(struct vertex_taskmgr *tmgr);
+
+int vertex_task_init(struct vertex_taskmgr *tmgr, unsigned int id, void *owner);
+int vertex_task_deinit(struct vertex_taskmgr *tmgr);
+void vertex_task_flush(struct vertex_taskmgr *tmgr);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ktime.h>
+
+#include "vertex-log.h"
+#include "vertex-time.h"
+
+void vertex_get_timestamp(struct vertex_time *time)
+{
+ do_gettimeofday(&time->time);
+}
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_TIME_H__
+#define __VERTEX_TIME_H__
+
+#include <linux/time.h>
+
+enum vertex_time_measure_point {
+ VERTEX_TIME_QUEUE,
+ VERTEX_TIME_REQUEST,
+ VERTEX_TIME_RESOURCE,
+ VERTEX_TIME_PROCESS,
+ VERTEX_TIME_DONE,
+ VERTEX_TIME_COUNT
+};
+
+struct vertex_time {
+ struct timeval time;
+};
+
+void vertex_get_timestamp(struct vertex_time *time);
+
+#define VERTEX_TIME_IN_US(v) ((v).time.tv_sec * 1000000 + (v).time.tv_usec)
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "vertex-log.h"
+#include "vertex-util.h"
+
+void vertex_util_bitmap_init(unsigned long *bitmap, unsigned int nbits)
+{
+ bitmap_zero(bitmap, nbits);
+}
+
+unsigned int vertex_util_bitmap_get_zero_bit(unsigned long *bitmap,
+ unsigned int nbits)
+{
+ int bit, old_bit;
+
+ while ((bit = find_first_zero_bit(bitmap, nbits)) != nbits) {
+ old_bit = test_and_set_bit(bit, bitmap);
+ if (!old_bit)
+ return bit;
+ }
+
+ return nbits;
+}
+
+void vertex_util_bitmap_clear_bit(unsigned long *bitmap, int bit)
+{
+ clear_bit(bit, bitmap);
+}
+
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __VERTEX_UTIL_H__
+#define __VERTEX_UTIL_H__
+
+#include <linux/bitmap.h>
+
+void vertex_util_bitmap_init(unsigned long *bitmap, unsigned int nbits);
+unsigned int vertex_util_bitmap_get_zero_bit(unsigned long *bitmap,
+ unsigned int nbits);
+void vertex_util_bitmap_clear_bit(unsigned long *bitmap, int bit);
+
+#endif
--- /dev/null
+/*
+ * Samsung Exynos SoC series VIPx driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TEMP_VISION_FOR_LINUX_H__
+#define __TEMP_VISION_FOR_LINUX_H__
+
+#define MAX_NUM_OF_USER_PARAMS (180)
+
+enum vs4l_graph_flag {
+ VS4L_GRAPH_FLAG_FIXED,
+ VS4L_GRAPH_FLAG_SHARED_AMONG_SUBCHAINS,
+ VS4L_GRAPH_FLAG_SHARING_AMONG_SUBCHAINS_PREFERRED,
+ VS4L_GRAPH_FLAG_SHARED_AMONG_TASKS,
+ VS4L_GRAPH_FLAG_SHARING_AMONG_TASKS_PREFERRED,
+ VS4L_GRAPH_FLAG_DSBL_LATENCY_BALANCING,
+ VS4L_STATIC_ALLOC_LARGE_MPRB_INSTEAD_SMALL_FLAG,
+ VS4L_STATIC_ALLOC_PU_INSTANCE_LSB,
+ VS4L_GRAPH_FLAG_PRIMITIVE,
+ VS4L_GRAPH_FLAG_EXCLUSIVE,
+ VS4L_GRAPH_FLAG_PERIODIC,
+ VS4L_GRAPH_FLAG_ROUTING,
+ VS4L_GRAPH_FLAG_BYPASS,
+ VS4L_GRAPH_FLAG_END
+};
+
+struct vs4l_graph {
+ __u32 id;
+ __u32 priority;
+ __u32 time; /* in millisecond */
+ __u32 flags;
+ __u32 size;
+ unsigned long addr;
+};
+
+struct vs4l_format {
+ __u32 target;
+ __u32 format;
+ __u32 plane;
+ __u32 width;
+ __u32 height;
+};
+
+struct vs4l_format_list {
+ __u32 direction;
+ __u32 count;
+ struct vs4l_format *formats;
+};
+
+struct vs4l_param {
+ __u32 target;
+ unsigned long addr;
+ __u32 offset;
+ __u32 size;
+};
+
+struct vs4l_param_list {
+ __u32 count;
+ struct vs4l_param *params;
+};
+
+struct vs4l_ctrl {
+ __u32 ctrl;
+ __u32 value;
+};
+
+struct vs4l_roi {
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+};
+
+struct vs4l_buffer {
+ struct vs4l_roi roi;
+ union {
+ unsigned long userptr;
+ __s32 fd;
+ } m;
+ unsigned long reserved;
+};
+
+enum vs4l_buffer_type {
+ VS4L_BUFFER_LIST,
+ VS4L_BUFFER_ROI,
+ VS4L_BUFFER_PYRAMID
+};
+
+enum vs4l_memory {
+ VS4L_MEMORY_USERPTR = 1,
+ VS4L_MEMORY_VIRTPTR,
+ VS4L_MEMORY_DMABUF
+};
+
+struct vs4l_container {
+ __u32 type;
+ __u32 target;
+ __u32 memory;
+ __u32 reserved[4];
+ __u32 count;
+ struct vs4l_buffer *buffers;
+};
+
+enum vs4l_direction {
+ VS4L_DIRECTION_IN = 1,
+ VS4L_DIRECTION_OT
+};
+
+enum vs4l_cl_flag {
+ VS4L_CL_FLAG_TIMESTAMP,
+ VS4L_CL_FLAG_PREPARE = 8,
+ VS4L_CL_FLAG_INVALID,
+ VS4L_CL_FLAG_DONE
+};
+
+struct vs4l_container_list {
+ __u32 direction;
+ __u32 id;
+ __u32 index;
+ __u32 flags;
+ struct timeval timestamp[6];
+ __u32 count;
+ __u32 user_params[MAX_NUM_OF_USER_PARAMS];
+ struct vs4l_container *containers;
+};
+
+#define VS4L_DF_IMAGE(a, b, c, d) ((a) | (b << 8) | (c << 16) | (d << 24))
+#define VS4L_DF_IMAGE_RGB VS4L_DF_IMAGE('R', 'G', 'B', '2')
+#define VS4L_DF_IMAGE_RGBX VS4L_DF_IMAGE('R', 'G', 'B', 'A')
+#define VS4L_DF_IMAGE_NV12 VS4L_DF_IMAGE('N', 'V', '1', '2')
+#define VS4L_DF_IMAGE_NV21 VS4L_DF_IMAGE('N', 'V', '2', '1')
+#define VS4L_DF_IMAGE_YV12 VS4L_DF_IMAGE('Y', 'V', '1', '2')
+#define VS4L_DF_IMAGE_I420 VS4L_DF_IMAGE('I', '4', '2', '0')
+#define VS4L_DF_IMAGE_I422 VS4L_DF_IMAGE('I', '4', '2', '2')
+#define VS4L_DF_IMAGE_YUYV VS4L_DF_IMAGE('Y', 'U', 'Y', 'V')
+#define VS4L_DF_IMAGE_YUV4 VS4L_DF_IMAGE('Y', 'U', 'V', '4')
+#define VS4L_DF_IMAGE_U8 VS4L_DF_IMAGE('U', '0', '0', '8')
+#define VS4L_DF_IMAGE_U16 VS4L_DF_IMAGE('U', '0', '1', '6')
+#define VS4L_DF_IMAGE_U32 VS4L_DF_IMAGE('U', '0', '3', '2')
+#define VS4L_DF_IMAGE_S16 VS4L_DF_IMAGE('S', '0', '1', '6')
+#define VS4L_DF_IMAGE_S32 VS4L_DF_IMAGE('S', '0', '3', '2')
+
+#define VS4L_VERTEXIOC_S_GRAPH _IOW('V', 0, struct vs4l_graph)
+#define VS4L_VERTEXIOC_S_FORMAT _IOW('V', 1, struct vs4l_format_list)
+#define VS4L_VERTEXIOC_S_PARAM _IOW('V', 2, struct vs4l_param_list)
+#define VS4L_VERTEXIOC_S_CTRL _IOW('V', 3, struct vs4l_ctrl)
+#define VS4L_VERTEXIOC_STREAM_ON _IO('V', 4)
+#define VS4L_VERTEXIOC_STREAM_OFF _IO('V', 5)
+#define VS4L_VERTEXIOC_QBUF _IOW('V', 6, struct vs4l_container_list)
+#define VS4L_VERTEXIOC_DQBUF _IOW('V', 7, struct vs4l_container_list)
+
+#endif
+++ /dev/null
-config VISION_CORE
- bool "Vision Core"
- help
- This is a vision core
+++ /dev/null
-obj-$(CONFIG_VISION_CORE) += vision-dev.o
-obj-$(CONFIG_VISION_CORE) += vision-compat-ioctl32.o
-obj-$(CONFIG_VISION_CORE) += vision-ioctl.o
-obj-$(CONFIG_VISION_CORE) += vision-buffer.o
-
-EXTRA_CFLAGS += -Idrivers/vision/include
+++ /dev/null
-/*
- * Samsung Exynos SoC series VISION driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <media/videobuf2-dma-sg.h>
-#include <linux/dma-buf.h>
-#include <linux/ion_exynos.h>
-
-#include "vision-config.h"
-#include "vision-buffer.h"
-
-#include <asm/cacheflush.h>
-
-#define DEBUG_SENTENCE_MAX 300
-
-struct vision_debug_log {
- size_t dsentence_pos;
- char dsentence[DEBUG_SENTENCE_MAX];
-};
-
-#if 1 // should be removed
-void __npu_queue_print(struct vb_queue *q);
-#endif
-
-void vision_dmsg_concate(struct vision_debug_log *log, const char *fmt, ...)
-{
- va_list ap;
- char term[50];
- u32 copy_len;
-
- va_start(ap, fmt);
- vsnprintf(term, sizeof(term), fmt, ap);
- va_end(ap);
-
- if (log->dsentence_pos >= DEBUG_SENTENCE_MAX) {
- vision_err("debug message(%zd) over max\n", log->dsentence_pos);
- return;
- }
-
- copy_len = min((DEBUG_SENTENCE_MAX - log->dsentence_pos - 1), strlen(term));
- strncpy(log->dsentence + log->dsentence_pos, term, copy_len);
- log->dsentence_pos += copy_len;
- log->dsentence[log->dsentence_pos] = 0;
-}
-
-char * vision_dmsg_print(struct vision_debug_log *log)
-{
- log->dsentence_pos = 0;
- return log->dsentence;
-}
-
-#define DLOG_INIT() struct vision_debug_log vision_debug_log = { .dsentence_pos = 0 }
-#define DLOG(fmt, ...) vision_dmsg_concate(&vision_debug_log, fmt, ##__VA_ARGS__)
-#define DLOG_OUT() vision_dmsg_print(&vision_debug_log)
-
-struct vb_fmt vb_fmts[] = {
- {
- .name = "RGB",
- .colorspace = VS4L_DF_IMAGE_RGB,
- .planes = 1,
- .bitsperpixel = { 24 }
- }, {
- .name = "ARGB",
- .colorspace = VS4L_DF_IMAGE_RGBX,
- .planes = 1,
- .bitsperpixel = { 32 }
- }, {
- .name = "YUV 4:2:0 planar, Y/CbCr",
- .colorspace = VS4L_DF_IMAGE_NV12,
- .planes = 2,
- .bitsperpixel = { 8, 8 }
- }, {
- .name = "YUV 4:2:0 planar, Y/CrCb",
- .colorspace = VS4L_DF_IMAGE_NV21,
- .planes = 2,
- .bitsperpixel = { 8, 8 }
- }, {
- .name = "YUV 4:2:0 planar, Y/Cr/Cb",
- .colorspace = VS4L_DF_IMAGE_YV12,
- .planes = 3,
- .bitsperpixel = { 8, 4, 4 }
- }, {
- .name = "YUV 4:2:0 planar, Y/Cb/Cr",
- .colorspace = VS4L_DF_IMAGE_I420,
- .planes = 3,
- .bitsperpixel = { 8, 4, 4 }
- }, {
- .name = "YUV 4:2:2 planar, Y/Cb/Cr",
- .colorspace = VS4L_DF_IMAGE_I422,
- .planes = 3,
- .bitsperpixel = { 8, 4, 4 }
- }, {
- .name = "YUV 4:2:2 packed, YCbYCr",
- .colorspace = VS4L_DF_IMAGE_YUYV,
- .planes = 1,
- .bitsperpixel = { 16 }
- }, {
- .name = "YUV 4:4:4 packed, YCbCr",
- .colorspace = VS4L_DF_IMAGE_YUV4,
- .planes = 1,
- .bitsperpixel = { 24 }
- }, {
- .name = "VX unsigned 8 bit",
- .colorspace = VS4L_DF_IMAGE_U8,
- .planes = 1,
- .bitsperpixel = { 8 }
- }, {
- .name = "VX unsigned 16 bit",
- .colorspace = VS4L_DF_IMAGE_U16,
- .planes = 1,
- .bitsperpixel = { 16 }
- }, {
- .name = "VX unsigned 32 bit",
- .colorspace = VS4L_DF_IMAGE_U32,
- .planes = 1,
- .bitsperpixel = { 32 }
- }, {
- .name = "VX signed 16 bit",
- .colorspace = VS4L_DF_IMAGE_S16,
- .planes = 1,
- .bitsperpixel = { 16 }
- }, {
- .name = "VX signed 32 bit",
- .colorspace = VS4L_DF_IMAGE_S32,
- .planes = 1,
- .bitsperpixel = { 32 }
- }
-};
-
-void __vb_queue_print(struct vb_queue *q)
-{
- DLOG_INIT();
- struct vb_bundle *bundle, *temp;
-
- DLOG("[VB] queued(%d) :", atomic_read(&q->queued_count));
- list_for_each_entry_safe(bundle, temp, &q->queued_list, queued_entry) {
- DLOG("%d->", bundle->clist.index);
- }
- DLOG("X");
-
- vision_info("%s\n", DLOG_OUT());
-
- DLOG("[VB] process(%d) :", atomic_read(&q->process_count));
- list_for_each_entry_safe(bundle, temp, &q->process_list, process_entry) {
- DLOG("%d->", bundle->clist.index);
- }
- DLOG("X");
-
- vision_info("%s\n", DLOG_OUT());
-
- DLOG("[VB] done(%d) :", atomic_read(&q->done_count));
- list_for_each_entry_safe(bundle, temp, &q->done_list, done_entry) {
- DLOG("%d->", bundle->clist.index);
- }
- DLOG("X");
-
- vision_info("%s\n", DLOG_OUT());
-}
-
-void __vb_buffer_print(struct vs4l_container_list *c)
-{
- vision_info("[VB] c->direction : %d", c->direction);
- vision_info("[VB] c->id : %d", c->id);
- vision_info("[VB] c->index : %d", c->count);
-}
-
-static struct vb_fmt * __vb_find_format(u32 colorspace)
-{
- int ret = 0;
- u32 i;
- struct vb_fmt *fmt = NULL;
-
- for (i = 0; i < ARRAY_SIZE(vb_fmts); ++i) {
- if (vb_fmts[i].colorspace == colorspace) {
- fmt = &vb_fmts[i];
- break;
- }
- }
- if (i == ARRAY_SIZE(vb_fmts)) {
- vision_err("Format %d not found!!!\n", colorspace);
- ret = -EINVAL;
- goto p_err;
- }
-
-p_err:
- return fmt;
-}
-
-static int __vb_plane_size(struct vb_format *format)
-{
- int ret = 0;
- u32 plane;
- struct vb_fmt *fmt;
-
- BUG_ON(!format);
-
- fmt = format->fmt;
-
- if (fmt->planes > VB_MAX_PLANES) {
- vision_err("planes(%d) is invalid\n", fmt->planes);
- ret = -EINVAL;
- goto p_err;
- }
-
- for (plane = 0; plane < fmt->planes; ++plane)
- format->size[plane] = format->width * fmt->bitsperpixel[plane] / BITS_PER_BYTE* format->height * fmt->bitsperpixel[plane] / BITS_PER_BYTE;
-
-p_err:
- return ret;
-}
-
-static int __vb_unmap_dmabuf(struct vb_queue *q, struct vb_buffer *buffer)
-{
- int ret = 0;
-
- if (buffer->vaddr)
- dma_buf_vunmap(buffer->dma_buf, buffer->vaddr);
- if (buffer->daddr)
- ion_iovmm_unmap(buffer->attachment, buffer->daddr);
- if (buffer->sgt)
- dma_buf_unmap_attachment(buffer->attachment, buffer->sgt, DMA_BIDIRECTIONAL);
- if (buffer->attachment)
- dma_buf_detach(buffer->dma_buf, buffer->attachment);
- if (buffer->dma_buf)
- dma_buf_put(buffer->dma_buf);
-
- buffer->attachment = NULL;
- buffer->sgt = NULL;
- buffer->daddr = 0;
- buffer->vaddr = NULL;
-
- return ret;
-}
-
-static int __vb_unmap_virtptr(struct vb_queue *q, struct vb_buffer *buffer)
-{
- int ret = 0;
-
- if (buffer->reserved)
- kfree((void *)buffer->m.userptr);
-
- buffer->mem_priv = NULL;
- buffer->cookie = NULL;
- buffer->kvaddr = NULL;
- buffer->dbuf = NULL;
- buffer->dvaddr = 0;
- buffer->reserved = 0;
-
- return ret;
-}
-
-static int __vb_map_dmabuf(struct vb_queue *q, struct vb_buffer *buffer, u32 size)
-{
- int ret = 0;
- bool complete_suc = false;
-
- struct dma_buf_attachment *attachment;
- struct sg_table *sgt;
- dma_addr_t daddr;
- void *vaddr;
-
- buffer->attachment = NULL;
- buffer->sgt = NULL;
- buffer->daddr = 0;
- buffer->vaddr = NULL;
-
- buffer->dma_buf = dma_buf_get(buffer->m.fd);
-
- attachment = dma_buf_attach(buffer->dma_buf, q->alloc_dev);
- if (IS_ERR(attachment)) {
- ret = PTR_ERR(attachment);
- goto p_err;
- }
- buffer->attachment = attachment;
-
- sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto p_err;
- }
- buffer->sgt = sgt;
-
- daddr = ion_iovmm_map(attachment, 0, size, DMA_BIDIRECTIONAL, 0);
- if (IS_ERR_VALUE(daddr)) {
- vision_err("Failed to allocate iova (err 0x%p)\n", &daddr);
- goto p_err;
- }
- buffer->daddr = daddr;
- buffer->dvaddr = daddr;
-
- vaddr = dma_buf_vmap(buffer->dma_buf);
- if (IS_ERR(vaddr)) {
- vision_err("Failed to get vaddr (err 0x%p)\n", &vaddr);
- goto p_err;
- }
- buffer->vaddr = vaddr;
- buffer->kvaddr = vaddr;
-
- complete_suc = true;
-p_err:
- if (complete_suc != true) {
- __vb_unmap_dmabuf(q, buffer);
- }
- return ret;
-}
-
-static int __vb_map_virtptr(struct vb_queue *q, struct vb_buffer *buffer, u32 size)
-{
- int ret = 0;
- unsigned long tmp_buffer;
- void *mbuf = NULL;
-
- mbuf = kmalloc(size, GFP_KERNEL);
- if (!mbuf) {
- ret = -ENOMEM;
- goto p_err;
- }
-
- tmp_buffer = (unsigned long)mbuf;
-
- ret = copy_from_user((void *)tmp_buffer, (void *)buffer->m.userptr, size);
- if (ret) {
- memcpy((void *)tmp_buffer, (void *)buffer->m.userptr, size);
- ret = 0;
- //goto p_err;
-
- }
-
- /* back up - userptr */
- buffer->reserved = buffer->m.userptr;
- buffer->m.userptr = (unsigned long)tmp_buffer;
-
-p_err:
- return ret;
-}
-
-static int __vb_queue_alloc(struct vb_queue *q,
- struct vs4l_container_list *c)
-{
- DLOG_INIT();
- int ret = 0;
- u32 i, j;
- size_t alloc_size;
- u8 *mapped_ptr;
- struct vb_format_list *flist;
- struct vb_bundle *bundle;
- struct vb_container_list *clist;
- struct vb_container *container;
- struct vb_buffer *buffer;
- struct vs4l_container *tmp_containers = NULL;
- struct vs4l_buffer tmp_buffer;
-
- BUG_ON(!q);
- BUG_ON(!c);
- BUG_ON(c->index >= VB_MAX_BUFFER);
-
- flist = &q->format;
-
-
- /* allocation */
- alloc_size = sizeof(struct vb_bundle);
- alloc_size += sizeof(struct vb_container) * c->count;
-
- tmp_containers = kzalloc(sizeof(struct vs4l_container) * c->count, GFP_KERNEL);
-
- for (i = 0; i < c->count; ++i) {
- ret = copy_from_user((void *)&tmp_containers[i], (void *)&c->containers[i], sizeof(struct vs4l_container));
- if (ret) {
- memcpy((void *)&tmp_containers[i], (void *)&c->containers[i], sizeof(struct vs4l_container));
- ret = 0;
- // goto p_err;
- }
-
- alloc_size += sizeof(struct vb_buffer) * tmp_containers[i].count;
- }
-
- bundle = kzalloc(alloc_size, GFP_KERNEL);
- if (!bundle) {
- ret = -ENOMEM;
- goto p_err;
- }
-
- /* mapping */
- mapped_ptr = (u8 *)bundle + sizeof(struct vb_bundle);
- bundle->clist.containers = (struct vb_container *)mapped_ptr;
- mapped_ptr += sizeof(struct vb_container) * c->count;
- for (i = 0; i < c->count; ++i) {
- bundle->clist.containers[i].buffers = (struct vb_buffer *)mapped_ptr;
- mapped_ptr += sizeof(struct vb_buffer) * tmp_containers[i].count;
- }
-
- /* fill */
- bundle->state = VB_BUF_STATE_DEQUEUED;
- clear_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
- clear_bit(VS4L_CL_FLAG_INVALID, &bundle->flags);
- clear_bit(VS4L_CL_FLAG_DONE, &bundle->flags);
-
- clist = &bundle->clist;
- clist->index = c->index;
- clist->id = c->id;
- clist->direction = c->direction;
- clist->count = c->count;
- clist->flags = c->flags;
-
- for (i = 0; i < _MAX_NUM_OF_USER_PARAMS; ++i)
- clist->user_params[i] = c->user_params[i];
-
- for (i = 0; i < clist->count; ++i) {
- container = &clist->containers[i];
- container->count = tmp_containers[i].count;
- container->memory = tmp_containers[i].memory;
- container->reserved[0] = tmp_containers[i].reserved[0];
- container->reserved[1] = tmp_containers[i].reserved[1];
- container->reserved[2] = tmp_containers[i].reserved[2];
- container->reserved[3] = tmp_containers[i].reserved[3];
- container->target = tmp_containers[i].target;
- container->type = tmp_containers[i].type;
-
- for (j = 0; j < flist->count; ++j) {
- DLOG("c%d t%d == f%d t%d\n", i, container->target, j, flist->formats[j].target);
- if (container->target == flist->formats[j].target) {
- container->format = &flist->formats[j];
- break;
- }
- }
-
- if (!container->format) {
- vision_err("format is not found\n");
- vision_err("%s", DLOG_OUT());
- kfree(bundle);
- ret = -EINVAL;
- goto p_err;
- }
-
- for (j = 0; j < container->count; ++j) {
- buffer = &container->buffers[j];
-
- ret = copy_from_user((void *)&tmp_buffer, (void *)&tmp_containers[i].buffers[j], sizeof(struct vs4l_buffer));
- if (ret) {
- memcpy((void *)&tmp_buffer,
- (void *)&tmp_containers[i].buffers[j], sizeof(struct vs4l_buffer));
- ret = 0;
- //goto p_err;
- }
- buffer->roi = tmp_buffer.roi;
- buffer->m.userptr = tmp_buffer.m.userptr;
- buffer->dbuf = NULL;
- buffer->mem_priv = NULL;
- buffer->cookie = NULL;
- buffer->kvaddr = NULL;
- buffer->dvaddr = 0;
- }
- }
-
- q->bufs[c->index] = bundle;
- q->num_buffers++;
-
-p_err:
- if (tmp_containers)
- kfree(tmp_containers);
-
- return ret;
-}
-
-static int __vb_queue_free(struct vb_queue *q,
- struct vb_bundle *bundle)
-{
- int ret = 0;
-
- BUG_ON(!bundle);
- BUG_ON(bundle->clist.index >= VB_MAX_BUFFER);
-
- q->bufs[bundle->clist.index] = NULL;
- kfree(bundle);
- q->num_buffers--;
-
- return ret;
-}
-
-static int __vb_queue_check(struct vb_bundle *bundle,
- struct vs4l_container_list *c)
-{
- int ret = 0;
- u32 i, j;
- struct vb_container_list *clist;
- struct vb_container *container;
- struct vb_buffer *buffer;
- struct vs4l_container tmp_container;
- struct vs4l_buffer tmp_buffer;
-
- BUG_ON(!bundle);
- BUG_ON(!c);
-
- clist = &bundle->clist;
-
- if (clist->index != c->index) {
- vision_err("index is conflict(%d != %d)\n", clist->index, c->index);
- ret = -EINVAL;
- goto p_err;
- }
-
- if (clist->direction != c->direction) {
- vision_err("direction is conflict(%d != %d)\n", clist->direction, c->direction);
- ret = -EINVAL;
- goto p_err;
- }
-
- if (clist->count != c->count) {
- vision_err("count is conflict(%d != %d)\n", clist->count, c->count);
- ret = -EINVAL;
- goto p_err;
- }
-
- clist->flags = c->flags;
- clist->id = c->id;
-
- for (i = 0; i < clist->count; ++i) {
- container = &clist->containers[i];
-
- ret = copy_from_user((void *)&tmp_container, (void *)&c->containers[i], sizeof(struct vs4l_container));
- if (ret) {
- memcpy((void *)&tmp_container, (void *)&c->containers[i], sizeof(struct vs4l_container));
- ret = 0;
- //goto p_err;
- }
-
- if (container->target != tmp_container.target) {
- vision_err("target is conflict(%d != %d)\n", container->target, tmp_container.target);
- ret = -EINVAL;
- goto p_err;
- }
-
- if (container->count != tmp_container.count) {
- vision_err("count is conflict(%d != %d)\n", container->count, tmp_container.count);
- ret = -EINVAL;
- goto p_err;
- }
-
- for (j = 0; j < container->count; ++j) {
- buffer = &container->buffers[j];
- ret = copy_from_user((void *)&tmp_buffer, (void *)&tmp_container.buffers[j], sizeof(struct vs4l_buffer));
- if (ret) {
- memcpy((void *)&tmp_buffer,
- (void *)&tmp_container.buffers[j], sizeof(struct vs4l_buffer));
- ret = 0;
- //goto p_err;
- }
- buffer->roi = tmp_buffer.roi;
-
- if (buffer->m.fd != tmp_buffer.m.fd) {
- vision_err("index(%d) buffer is conflict(%d != %d)\n", c->index, buffer->m.fd, tmp_buffer.m.fd);
- ret = -EINVAL;
- goto p_err;
- }
- }
- }
-
-p_err:
- return ret;
-}
-
-static int __vb_buf_prepare(struct vb_queue *q, struct vb_bundle *bundle)
-{
- int ret = 0;
- u32 i, j, k;
- struct vb_format *format;
- struct vb_container *container;
- struct vb_buffer *buffer;
-
- BUG_ON(!q);
- BUG_ON(!bundle);
-
- if (test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
- goto p_err;
-
- for (i = 0; i < bundle->clist.count; ++i) {
- container = &bundle->clist.containers[i];
- format = container->format;
-
- switch (container->type) {
- case VS4L_BUFFER_LIST:
- k = container->count;
- break;
- case VS4L_BUFFER_ROI:
- k = container->count;
- break;
- case VS4L_BUFFER_PYRAMID:
- k = container->count;
- break;
- default:
- vision_err("unsupported container type\n");
- goto p_err;
- }
-
- switch (container->memory) {
- case VS4L_MEMORY_DMABUF:
- for (j = 0; j < k; ++j) {
- buffer = &container->buffers[j];
- ret = __vb_map_dmabuf(q, buffer, format->size[j]);
- if (ret) {
- vision_err("__vb_qbuf_dmabuf[%d/%d] is fail(%d)\n", j, k, ret);
- goto p_err;
- }
- }
- break;
- case VS4L_MEMORY_VIRTPTR:
- for (j = 0; j < k; ++j) {
- buffer = &container->buffers[j];
- ret = __vb_map_virtptr(q, buffer, format->size[j]);
- if (ret) {
- vision_err("__vb_map_virtptr is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- default:
- vision_err("unsupported container memory type\n");
- goto p_err;
- }
- }
-
- ret = call_op(q, buf_prepare, q, &bundle->clist);
- if (ret) {
- vision_err("call_op(buf_prepare) is fail(%d)\n", ret);
- goto p_err;
- }
-
- set_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
-
-p_err:
- return ret;
-}
-
-static int __vb_buf_unprepare(struct vb_queue *q, struct vb_bundle *bundle)
-{
- int ret = 0;
- u32 i, j, k;
- struct vb_format *format;
- struct vb_container *container;
- struct vb_buffer *buffer;
-
- BUG_ON(!q);
- BUG_ON(!bundle);
-
-
- if (!test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
- goto p_err;
-
- for (i = 0; i < bundle->clist.count; ++i) {
- container = &bundle->clist.containers[i];
- format = container->format;
-
- switch (container->type) {
- case VS4L_BUFFER_LIST:
- k = container->count;
- break;
- case VS4L_BUFFER_ROI:
- k = container->count;
- break;
- case VS4L_BUFFER_PYRAMID:
- k = container->count;
- break;
- default:
- vision_err("unsupported container type\n");
- goto p_err;
- }
-
- switch (container->memory) {
- case VS4L_MEMORY_DMABUF:
- for (j = 0; j < k; ++j) {
- buffer = &container->buffers[j];
- ret = __vb_unmap_dmabuf(q, buffer);
- if (ret) {
- vision_err("__vb_qbuf_dmabuf is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_MEMORY_VIRTPTR:
- for (j = 0; j < k; ++j) {
- buffer = &container->buffers[j];
- ret = __vb_unmap_virtptr(q, buffer);
- if (ret) {
- vision_err("__vb_unmap_virtptr is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- default:
- vision_err("unsupported container memory type\n");
- goto p_err;
- }
- }
-
- ret = call_op(q, buf_unprepare, q, &bundle->clist);
- if (ret) {
- vision_err("call_op(buf_unprepare) is fail(%d)\n", ret);
- goto p_err;
- }
-
- clear_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags);
-
-p_err:
- return ret;
-}
-
-static int __vb_wait_for_done_vb(struct vb_queue *q, int nonblocking)
-{
- int ret = 0;
-
- if (!q->streaming) {
- vision_err("Streaming off, will not wait for buffers\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- if (!list_empty(&q->done_list))
- goto p_err;
-
- if (nonblocking) {
- vision_err("Nonblocking and no buffers to dequeue, will not wait\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- mutex_unlock(q->lock);
-
- ret = wait_event_interruptible(q->done_wq, !list_empty(&q->done_list) || !q->streaming);
-
- mutex_lock(q->lock);
-
-p_err:
- return ret;
-}
-
-static int __vb_get_done_vb(struct vb_queue *q,
- struct vb_bundle **bundle,
- struct vs4l_container_list *c,
- int nonblocking)
-{
- unsigned long flags;
- int ret;
-
- /*
- * Wait for at least one buffer to become available on the done_list.
- */
- ret = __vb_wait_for_done_vb(q, nonblocking);
- if (ret)
- return ret;
-
- /*
- * Driver's lock has been held since we last verified that done_list
- * is not empty, so no need for another list_empty(done_list) check.
- */
-
- spin_lock_irqsave(&q->done_lock, flags);
-
- *bundle = list_first_entry(&q->done_list, struct vb_bundle, done_entry);
-
- list_del(&(*bundle)->done_entry);
- atomic_dec(&q->done_count);
-
- spin_unlock_irqrestore(&q->done_lock, flags);
-
- return ret;
-}
-
-static void __fill_vs4l_buffer(struct vb_bundle *bundle,
- struct vs4l_container_list *c)
-{
- struct vb_container_list *clist;
-
- clist = &bundle->clist;
- c->flags &= ~(1 << VS4L_CL_FLAG_TIMESTAMP);
- c->flags &= ~(1 << VS4L_CL_FLAG_PREPARE);
- c->flags &= ~(1 << VS4L_CL_FLAG_INVALID);
- c->flags &= ~(1 << VS4L_CL_FLAG_DONE);
-
- if (test_bit(VS4L_CL_FLAG_TIMESTAMP, &clist->flags)) {
- c->flags |= (1 << VS4L_CL_FLAG_TIMESTAMP);
- memcpy(c->timestamp, clist->timestamp, sizeof(clist->timestamp));
- }
-
- if (test_bit(VS4L_CL_FLAG_PREPARE, &bundle->flags))
- c->flags |= (1 << VS4L_CL_FLAG_PREPARE);
-
- if (test_bit(VS4L_CL_FLAG_INVALID, &bundle->flags))
- c->flags |= (1 << VS4L_CL_FLAG_INVALID);
-
- if (test_bit(VS4L_CL_FLAG_DONE, &bundle->flags))
- c->flags |= (1 << VS4L_CL_FLAG_DONE);
-
- c->index = clist->index;
- c->id = clist->id;
-}
-
-static void __vb_dqbuf(struct vb_bundle *bundle)
-{
- if (bundle->state == VB_BUF_STATE_DEQUEUED)
- return;
-
- bundle->state = VB_BUF_STATE_DEQUEUED;
-}
-
-int vb_queue_init(struct vb_queue *q,
- void *alloc_dev,
- const struct vb2_mem_ops *mem_ops,
- const struct vb_ops *ops,
- struct mutex *lock,
- u32 direction)
-{
- int ret = 0;
-
- INIT_LIST_HEAD(&q->queued_list);
- atomic_set(&q->queued_count, 0);
-
- INIT_LIST_HEAD(&q->process_list);
- atomic_set(&q->process_count, 0);
-
- INIT_LIST_HEAD(&q->done_list);
- atomic_set(&q->done_count, 0);
-
- spin_lock_init(&q->done_lock);
- init_waitqueue_head(&q->done_wq);
-
- q->num_buffers = 0;
- q->direction = direction;
- q->lock = lock;
- q->streaming = 0;
- q->alloc_dev = alloc_dev;
- q->mem_ops = mem_ops;
- q->ops = ops;
-
- clear_bit(VB_QUEUE_STATE_FORMAT, &q->state);
- q->format.count = 0;
- q->format.formats = NULL;
-
- return ret;
-}
-
-int vb_queue_s_format(struct vb_queue *q, struct vs4l_format_list *flist)
-{
- int ret = 0;
- u32 i;
- struct vs4l_format *f;
- struct vb_fmt *fmt;
-
- q->format.count = flist->count;
- q->format.formats = kcalloc(flist->count, sizeof(struct vb_format), GFP_KERNEL);
-
- for (i = 0; i < flist->count; ++i) {
- f = &flist->formats[i];
-
- fmt = __vb_find_format(f->format);
- if (!fmt) {
- vision_err("__vb_find_format is fail\n");
- kfree(q->format.formats);
- ret = -EINVAL;
- goto p_err;
- }
-
- q->format.formats[i].fmt = fmt;
- q->format.formats[i].colorspace = f->format;
- q->format.formats[i].target = f->target;
- q->format.formats[i].plane = f->plane;
- q->format.formats[i].width = f->width;
- q->format.formats[i].height = f->height;
-
- ret = __vb_plane_size(&q->format.formats[i]);
- if (ret) {
- vision_err("__vb_plane_size is fail(%d)\n", ret);
- kfree(q->format.formats);
- goto p_err;
- }
- }
-
- set_bit(VB_QUEUE_STATE_FORMAT, &q->state);
-
-p_err:
- return ret;
-}
-
-int vb_queue_start(struct vb_queue *q)
-{
- int ret = 0;
-
- if (!test_bit(VB_QUEUE_STATE_FORMAT, &q->state)) {
- vision_err("format is not configured\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- q->streaming = 1;
- set_bit(VB_QUEUE_STATE_START, &q->state);
-
-p_err:
- return ret;
-}
-
-int vb_queue_stop(struct vb_queue *q)
-{
- int ret = 0;
- u32 i;
- struct vb_bundle *bundle;
-
- q->streaming = 0;
-
- wake_up_all(&q->done_wq);
-
- if (atomic_read(&q->queued_count) > 0) {
- vision_err("queued list is not empty\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- if (atomic_read(&q->process_count) > 0) {
- vision_err("process list is not empty\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- if (atomic_read(&q->done_count) > 0) {
- vision_err("done list is not empty\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- INIT_LIST_HEAD(&q->queued_list);
- INIT_LIST_HEAD(&q->process_list);
- INIT_LIST_HEAD(&q->done_list);
-
-// for (i = 0; i < VB_MAX_BUFFER; ++i) {
- for (i = 0; i < q->num_buffers; ++i) {
-
- bundle = q->bufs[i];
- if (!bundle)
- continue;
-
- ret = __vb_buf_unprepare(q, bundle);
- if (ret) {
- vision_err("__vb_buf_unprepare is fail(%d)\n", ret);
- goto p_err;
- }
-
- ret = __vb_queue_free(q, bundle);
- if (ret) {
- vision_err("__vb_queue_free is fail(%d)\n", ret);
- goto p_err;
- }
- }
-
- if (q->num_buffers != 0) {
- vision_err("memroy leakage is issued(%d)\n", q->num_buffers);
- BUG();
- }
-
- clear_bit(VB_QUEUE_STATE_START, &q->state);
-
-p_err:
- return ret;
-}
-
-int vb_queue_qbuf(struct vb_queue *q, struct vs4l_container_list *c)
-{
- int ret = 0;
- struct vb_bundle *bundle;
- struct vb_container *container;
- u32 direction;
- u32 i;
- u32 j, k, size;
-
- if (q->direction != c->direction) {
- vision_err("qbuf: invalid buffer direction\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- if (c->index >= VB_MAX_BUFFER) {
- vision_err("qbuf: buffer index out of range\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- bundle = q->bufs[c->index];
- if (bundle) {
- ret = __vb_queue_check(bundle, c);
- if (ret) {
- vision_err("__vb_queue_check is fail(%d)\n", ret);
- goto p_err;
- }
- } else {
- ret = __vb_queue_alloc(q, c);
- if (ret) {
- vision_err("__vb_queue_alloc is fail(%d)\n", ret);
- goto p_err;
- }
-
- bundle = q->bufs[c->index];
- }
-
- if (bundle->state != VB_BUF_STATE_DEQUEUED) {
- vision_err("qbuf: buffer already in use\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = __vb_buf_prepare(q, bundle);
- if (ret) {
- vision_err("__vb_buf_prepare is fail(%d)\n", ret);
- goto p_err;
- }
-
- if (q->direction == VS4L_DIRECTION_OT) {
- direction = DMA_FROM_DEVICE;
- } else {
- direction = DMA_TO_DEVICE;
- }
-
- /* sync buffers */
- for (i = 0; i < bundle->clist.count; ++i) {
- container = &bundle->clist.containers[i];
- BUG_ON(!container->format);
-#ifdef CONFIG_ION_EXYNOS
- if (container->memory != VS4L_MEMORY_VIRTPTR) {
- k = container->count;
- size = container->format->size[container->format->plane];
- for (j = 0; j < k; ++j) {
- __dma_map_area(container->buffers[j].vaddr, size, direction);
- }
- }
-#endif
- }
-
- /*
- * Add to the queued buffers list, a buffer will stay on it until
- * dequeued in dqbuf.
- */
- list_add_tail(&bundle->queued_entry, &q->queued_list);
- bundle->state = VB_BUF_STATE_QUEUED;
- atomic_inc(&q->queued_count);
-
- /* vb_buffer_print(&vb->buffer); */
-
-p_err:
- return ret;
-}
-
-int vb_queue_dqbuf(struct vb_queue *q,
- struct vs4l_container_list *c,
- bool nonblocking)
-{
- int ret = 0;
- struct vb_bundle *bundle;
-
- if (q->direction != c->direction) {
- vision_err("qbuf: invalid buffer direction\n");
- ret = -EINVAL;
- goto p_err;
- }
-
- ret = __vb_get_done_vb(q, &bundle, c, nonblocking);
- if (ret < 0) {
- vision_err("__vb2_get_done_vb is fail(%d)\n", ret);
- return ret;
- }
-
- if (bundle->state != VB_BUF_STATE_DONE) {
- vision_err("dqbuf: Invalid buffer state(%X)\n", bundle->state);
- return -EINVAL;
- }
-
- /* Fill buffer information for the userspace */
- __fill_vs4l_buffer(bundle, c);
- /* Remove from videobuf queue */
- /* go back to dequeued state */
- __vb_dqbuf(bundle);
-
- list_del(&bundle->queued_entry);
- atomic_dec(&q->queued_count);
-
-p_err:
- return ret;
-}
-
-void vb_queue_process(struct vb_queue *q, struct vb_bundle *bundle)
-{
- BUG_ON(!q);
- BUG_ON(!bundle);
- BUG_ON(q->direction != bundle->clist.direction);
-
- bundle->state = VB_BUF_STATE_PROCESS;
- list_add_tail(&bundle->process_entry, &q->process_list);
- atomic_inc(&q->process_count);
-}
-
-void vb_queue_done(struct vb_queue *q, struct vb_bundle *bundle)
-{
- struct vb_container *container;
- unsigned long flag;
- u32 direction, size;
- u32 i, j, k;
-
- BUG_ON(!q);
- BUG_ON(!bundle);
- BUG_ON(q->direction != bundle->clist.direction);
-
- if (q->direction == VS4L_DIRECTION_OT)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- /* sync buffers */
- for (i = 0; i < bundle->clist.count; ++i) {
- container = &bundle->clist.containers[i];
- BUG_ON(!container->format);
-#ifdef CONFIG_ION_EXYNOS
- if (container->memory != VS4L_MEMORY_VIRTPTR) {
- k = container->count;
- size = container->format->size[container->format->plane];
- for (j = 0; j < k; ++j)
- __dma_map_area(container->buffers[j].vaddr, size, direction);
- }
-#endif
- }
-
- spin_lock_irqsave(&q->done_lock, flag);
-
- list_del(&bundle->process_entry);
- atomic_dec(&q->process_count);
-
- bundle->state = VB_BUF_STATE_DONE;
- list_add_tail(&bundle->done_entry, &q->done_list);
- atomic_inc(&q->done_count);
-
- spin_unlock_irqrestore(&q->done_lock, flag);
- wake_up(&q->done_wq);
-}
+++ /dev/null
-/*
- * Samsung Exynos SoC series VISION driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <asm/uaccess.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-
-#include "vision-config.h"
-#include "vision-dev.h"
-#include "vision-ioctl.h"
-
-struct vs4l_graph32 {
- __u32 id;
- __u32 priority;
- __u32 time;
- __u32 flags;
- __u32 size;
- compat_caddr_t addr;
-};
-
-struct vs4l_format32 {
- __u32 target;
- __u32 format;
- __u32 plane;
- __u32 width;
- __u32 height;
-};
-
-struct vs4l_format_list32 {
- __u32 direction;
- __u32 count;
- compat_caddr_t formats;
-};
-
-struct vs4l_param32 {
- __u32 target;
- compat_caddr_t addr;
- __u32 offset;
- __u32 size;
-};
-
-struct vs4l_param_list32 {
- __u32 count;
- compat_caddr_t params;
-};
-
-struct vs4l_ctrl32 {
- __u32 ctrl;
- __u32 value;
-};
-
-struct vs4l_roi32 {
- __u32 x;
- __u32 y;
- __u32 w;
- __u32 h;
-};
-
-struct vs4l_buffer32 {
- struct vs4l_roi roi;
- union {
- compat_caddr_t userptr;
- __s32 fd;
- } m;
- compat_caddr_t reserved;
-};
-
-struct vs4l_container32 {
- __u32 type;
- __u32 target;
- __u32 memory;
- __u32 reserved[4];
- __u32 count;
- compat_caddr_t buffers;
-};
-
-struct vs4l_container_list32 {
- __u32 direction;
- __u32 id;
- __u32 index;
- __u32 flags;
- struct compat_timeval timestamp[6];
- __u32 count;
- __u32 user_params[_MAX_NUM_OF_USER_PARAMS];
- compat_caddr_t containers;
-};
-
-#define VS4L_VERTEXIOC_S_GRAPH32 _IOW('V', 0, struct vs4l_graph32)
-#define VS4L_VERTEXIOC_S_FORMAT32 _IOW('V', 1, struct vs4l_format_list32)
-#define VS4L_VERTEXIOC_S_PARAM32 _IOW('V', 2, struct vs4l_param_list32)
-#define VS4L_VERTEXIOC_S_CTRL32 _IOW('V', 3, struct vs4l_ctrl32)
-#define VS4L_VERTEXIOC_QBUF32 _IOW('V', 6, struct vs4l_container_list32)
-#define VS4L_VERTEXIOC_DQBUF32 _IOW('V', 7, struct vs4l_container_list32)
-
-static int get_vs4l_graph32(struct vs4l_graph *kp, struct vs4l_graph32 __user *up)
-{
- int ret = 0;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct vs4l_graph32)) ||
- get_user(kp->id, &up->id) ||
- get_user(kp->flags, &up->flags) ||
- get_user(kp->time, &up->time) ||
- get_user(kp->size, &up->size) ||
- get_user(kp->addr, &up->addr) ||
- get_user(kp->priority, &up->priority)) {
- vision_err("get_user is fail1\n");
- ret = -EFAULT;
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-static void put_vs4l_graph32(struct vs4l_graph *kp, struct vs4l_graph32 __user *up)
-{
-}
-
-static int get_vs4l_format32(struct vs4l_format_list *kp, struct vs4l_format_list32 __user *up)
-{
- int ret = 0;
- u32 index, size;
- compat_caddr_t p;
- struct vs4l_format32 __user *uformats32;
- struct vs4l_format *kformats;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct vs4l_format_list32)) ||
- get_user(kp->direction, &up->direction) ||
- get_user(kp->count, &up->count)) {
- vision_err("get_user is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- if (get_user(p, &up->formats)) {
- vision_err("get_user(formats) is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- size = kp->count * sizeof(struct vs4l_format32);
- uformats32 = compat_ptr(p);
- if (!access_ok(VERIFY_READ, uformats32, size)) {
- vision_err("acesss is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- size = kp->count * sizeof(struct vs4l_format);
- kformats = kmalloc(size, GFP_KERNEL);
- if (!kformats) {
- vision_err("kmalloc is fail\n");
- ret = -ENOMEM;
- goto p_err;
- }
-
- for (index = 0; index < kp->count; ++index) {
- if (get_user(kformats[index].target, &uformats32[index].target)) {
- vision_err("get_user(target) is fail\n");
- ret = -EFAULT;
- goto p_err_format_alloc;
- }
-
- if (get_user(kformats[index].format, &uformats32[index].format)) {
- vision_err("get_user(format) is fail\n");
- ret = -EFAULT;
- goto p_err_format_alloc;
- }
-
- if (get_user(kformats[index].plane, &uformats32[index].plane)) {
- vision_err("get_user(plane) is fail\n");
- ret = -EFAULT;
- goto p_err_format_alloc;
- }
-
- if (get_user(kformats[index].width, &uformats32[index].width)) {
- vision_err("get_user(width) is fail\n");
- ret = -EFAULT;
- goto p_err_format_alloc;
- }
-
- if (get_user(kformats[index].height, &uformats32[index].height)) {
- vision_err("get_user(height) is fail\n");
- ret = -EFAULT;
- goto p_err_format_alloc;
- }
- }
-
- kp->formats = kformats;
- return ret;
-
-p_err_format_alloc:
- kfree(kformats);
- kp->formats = NULL;
-
-p_err:
- vision_err("Return with failure (%d)\n", ret);
- return ret;
-}
-
-static void put_vs4l_format32(struct vs4l_format_list *kp, struct vs4l_format_list32 __user *up)
-{
- kfree(kp->formats);
-}
-
-static int get_vs4l_param32(struct vs4l_param_list *kp, struct vs4l_param_list32 __user *up)
-{
- int ret = 0;
- u32 index, size;
- compat_caddr_t p;
- struct vs4l_param32 __user *uparams32;
- struct vs4l_param *kparams;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct vs4l_param_list32)) ||
- get_user(kp->count, &up->count)) {
- vision_err("get_user is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- if (get_user(p, &up->params)) {
- vision_err("get_user(params) is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- size = kp->count * sizeof(struct vs4l_param32);
- uparams32 = compat_ptr(p);
- if (!access_ok(VERIFY_READ, uparams32, size)) {
- vision_err("access is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- size = kp->count * sizeof(struct vs4l_param);
- kparams = kmalloc(size, GFP_KERNEL);
- if (!kparams) {
- vision_err("kmalloc is fail\n");
- ret = -ENOMEM;
- goto p_err;
- }
-
- for (index = 0; index < kp->count; ++index) {
- if (get_user(kparams[index].target, &uparams32[index].target)) {
- vision_err("get_user(target) is fail\n");
- ret = -EFAULT;
- goto p_err_alloc;
- }
-
- if (get_user(kparams[index].addr, &uparams32[index].addr)) {
- vision_err("get_user(addr) is fail\n");
- ret = -EFAULT;
- goto p_err_alloc;
- }
-
- if (get_user(kparams[index].offset, &uparams32[index].offset)) {
- vision_err("get_user(offset) is fail\n");
- ret = -EFAULT;
- goto p_err_alloc;
- }
-
- if (get_user(kparams[index].size, &uparams32[index].size)) {
- vision_err("get_user(size) is fail\n");
- ret = -EFAULT;
- goto p_err_alloc;
- }
- }
-
- kp->params = kparams;
- return ret;
-
-p_err_alloc:
- kfree(kparams);
- kp->params = NULL;
-
-p_err:
- vision_err("Return with failure (%d)\n", ret);
- return ret;
-}
-
-static void put_vs4l_param32(struct vs4l_param_list *kp, struct vs4l_param_list32 __user *up)
-{
- kfree(kp->params);
-}
-
-static int get_vs4l_ctrl32(struct vs4l_ctrl *kp, struct vs4l_ctrl32 __user *up)
-{
- int ret = 0;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct vs4l_ctrl32)) ||
- get_user(kp->ctrl, &up->ctrl) ||
- get_user(kp->value, &up->value)) {
- vision_err("get_user is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-static void put_vs4l_ctrl32(struct vs4l_ctrl *kp, struct vs4l_ctrl32 __user *up)
-{
-}
-
-static int get_vs4l_container32(struct vs4l_container_list *kp, struct vs4l_container_list32 __user *up)
-{
- int ret = 0;
- u32 i, j, size;
- compat_caddr_t p;
- struct vs4l_container32 *ucontainer32;
- struct vs4l_container *kcontainer;
- struct vs4l_buffer32 *ubuffer32;
- struct vs4l_buffer *kbuffer;
-
- if (!access_ok(VERIFY_READ, up, sizeof(struct vs4l_container_list32)) ||
- get_user(kp->direction, &up->direction) ||
- get_user(kp->id, &up->id) ||
- get_user(kp->index, &up->index) ||
- get_user(kp->flags, &up->flags) ||
- get_user(kp->count, &up->count) ||
- copy_from_user((void *)kp->user_params, (void *)&up->user_params, sizeof(__u32)*_MAX_NUM_OF_USER_PARAMS)) {
- vision_err("get_user is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- if (get_user(p, &up->containers)) {
- vision_err("get_user(containers) is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- size = sizeof(struct vs4l_container32) * kp->count;
- ucontainer32 = compat_ptr(p);
- if (!access_ok(VERIFY_READ, ucontainer32, size)) {
- vision_err("access is fail\n");
- ret = -EFAULT;
- goto p_err;
- }
-
- size = sizeof(struct vs4l_container) * kp->count;
- kcontainer = kzalloc(size, GFP_KERNEL);
- if (!kcontainer) {
- vision_err("kmalloc is fail(%zu, %u)\n", size, kp->count);
- ret = -ENOMEM;
- goto p_err;
- }
-
- kp->containers = kcontainer;
-
- for (i = 0; i < kp->count; ++i) {
- if (get_user(kcontainer[i].type, &ucontainer32[i].type)) {
- vision_err("get_user(type) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].target, &ucontainer32[i].target)) {
- vision_err("get_user(target) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].memory, &ucontainer32[i].memory)) {
- vision_err("get_user(memory) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].reserved[0], &ucontainer32[i].reserved[0])) {
- vision_err("get_user(reserved[0]) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].reserved[1], &ucontainer32[i].reserved[1])) {
- vision_err("get_user(reserved[1]) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].reserved[2], &ucontainer32[i].reserved[2])) {
- vision_err("get_user(reserved[2]) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].reserved[3], &ucontainer32[i].reserved[3])) {
- vision_err("get_user(reserved[3]) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kcontainer[i].count, &ucontainer32[i].count)) {
- vision_err("get_user(count) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(p, &ucontainer32[i].buffers)) {
- vision_err("get_user(buffers) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
-
- size = sizeof(struct vs4l_buffer32) * kcontainer[i].count;
- ubuffer32 = compat_ptr(p);
- if (!access_ok(VERIFY_READ, ubuffer32, size)) {
- vision_err("access is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- size = sizeof(struct vs4l_buffer) * kcontainer[i].count;
- kbuffer = kmalloc(size, GFP_KERNEL);
- if (!kbuffer) {
- vision_err("kmalloc is fail(size : %zu)\n", size);
- ret = -ENOMEM;
- goto p_err_container_alloc;
- }
-
- kcontainer[i].buffers = kbuffer;
-
- for (j = 0; j < kcontainer[i].count; ++j) {
- if (get_user(kbuffer[j].roi.x, &ubuffer32[j].roi.x)) {
- vision_err("get_user(roi.x) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kbuffer[j].roi.y, &ubuffer32[j].roi.y)) {
- vision_err("get_user(roi.y) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kbuffer[j].roi.w, &ubuffer32[j].roi.w)) {
- vision_err("get_user(roi.w) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (get_user(kbuffer[j].roi.h, &ubuffer32[j].roi.h)) {
- vision_err("get_user(roi.h) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
-
- if (kcontainer[i].memory == VS4L_MEMORY_DMABUF) {
- if (get_user(kbuffer[j].m.fd, &ubuffer32[j].m.fd)) {
- vision_err("get_user(fd) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
- } else {
- if (get_user(kbuffer[j].m.userptr, &ubuffer32[j].m.userptr)) {
- vision_err("get_user(userptr) is fail\n");
- ret = -EFAULT;
- goto p_err_container_alloc;
- }
- }
- }
- }
- return ret;
-
-p_err_container_alloc:
- for (i = 0; i < kp->count; ++i) {
- if (kcontainer[i].buffers) {
- kfree(kcontainer[i].buffers);
- kcontainer[i].buffers = NULL;
- }
- }
-
- if (kp->containers) {
- kfree(kp->containers);
- kp->containers = NULL;
- }
-
-p_err:
- return ret;
-}
-
-static void put_vs4l_container32(struct vs4l_container_list *kp, struct vs4l_container_list32 __user *up)
-{
- u32 i;
-
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct vs4l_container_list32)) ||
- put_user(kp->flags, &up->flags) ||
- put_user(kp->index, &up->index) ||
- put_user(kp->id, &up->id) ||
- put_user(kp->timestamp[0].tv_sec, &up->timestamp[0].tv_sec) ||
- put_user(kp->timestamp[0].tv_usec, &up->timestamp[0].tv_usec) ||
- put_user(kp->timestamp[1].tv_sec, &up->timestamp[1].tv_sec) ||
- put_user(kp->timestamp[1].tv_usec, &up->timestamp[1].tv_usec) ||
- put_user(kp->timestamp[2].tv_sec, &up->timestamp[2].tv_sec) ||
- put_user(kp->timestamp[2].tv_usec, &up->timestamp[2].tv_usec) ||
- put_user(kp->timestamp[3].tv_sec, &up->timestamp[3].tv_sec) ||
- put_user(kp->timestamp[3].tv_usec, &up->timestamp[3].tv_usec) ||
- put_user(kp->timestamp[4].tv_sec, &up->timestamp[4].tv_sec) ||
- put_user(kp->timestamp[4].tv_usec, &up->timestamp[4].tv_usec) ||
- put_user(kp->timestamp[5].tv_sec, &up->timestamp[5].tv_sec) ||
- put_user(kp->timestamp[5].tv_usec, &up->timestamp[5].tv_usec) ||
- copy_to_user(up->user_params, kp->user_params, sizeof(__u32)*_MAX_NUM_OF_USER_PARAMS)) {
- vision_err("write access error\n");
- BUG();
- }
-
- for (i = 0; i < kp->count; ++i)
- kfree(kp->containers[i].buffers);
-
- kfree(kp->containers);
-}
-
-long vertex_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret = 0;
- union {
- struct vs4l_graph vsgp;
- struct vs4l_format_list vsfl;
- struct vs4l_param_list vspl;
- struct vs4l_ctrl vsct;
- struct vs4l_container_list vscl;
- } karg;
- void __user *up = compat_ptr(arg);
- struct vision_device *vdev = vision_devdata(file);
- const struct vertex_ioctl_ops *ops = vdev->ioctl_ops;
-
- switch (cmd) {
- case VS4L_VERTEXIOC_S_GRAPH32:
- ret = get_vs4l_graph32(&karg.vsgp, up);
- if (ret) {
- vision_err("get_vs4l_graph32 is fail(%ld)\n", ret);
- goto p_err;
- }
-
- ret = ops->vertexioc_s_graph(file, &karg.vsgp);
- put_vs4l_graph32(&karg.vsgp, up);
- break;
- case VS4L_VERTEXIOC_S_FORMAT32:
- ret = get_vs4l_format32(&karg.vsfl, up);
- if (ret) {
- vision_err("get_vs4l_format32 is fail(%ld)\n", ret);
- goto p_err;
- }
-
- ret = ops->vertexioc_s_format(file, &karg.vsfl);
- put_vs4l_format32(&karg.vsfl, up);
- break;
- case VS4L_VERTEXIOC_S_PARAM32:
- ret = get_vs4l_param32(&karg.vspl, up);
- if (ret) {
- vision_err("get_vs4l_param32 is fail(%ld)\n", ret);
- goto p_err;
- }
-
- ret = ops->vertexioc_s_param(file, &karg.vspl);
- put_vs4l_param32(&karg.vspl, up);
- break;
- case VS4L_VERTEXIOC_S_CTRL32:
- ret = get_vs4l_ctrl32(&karg.vsct, up);
- if (ret) {
- vision_err("get_vs4l_ctrl32 is fail(%ld)\n", ret);
- goto p_err;
- }
-
- ret = ops->vertexioc_s_ctrl(file, &karg.vsct);
- put_vs4l_ctrl32(&karg.vsct, up);
- break;
- case VS4L_VERTEXIOC_STREAM_ON:
- ret = ops->vertexioc_streamon(file);
- break;
- case VS4L_VERTEXIOC_STREAM_OFF:
- ret = ops->vertexioc_streamoff(file);
- break;
- case VS4L_VERTEXIOC_QBUF32:
- ret = get_vs4l_container32(&karg.vscl, up);
- if (ret) {
- vision_err("qbuf, get_vs4l_container32 is fail(%ld)\n", ret);
- goto p_err;
- }
-
- ret = ops->vertexioc_qbuf(file, &karg.vscl);
- put_vs4l_container32(&karg.vscl, up);
- break;
- case VS4L_VERTEXIOC_DQBUF32:
- ret = get_vs4l_container32(&karg.vscl, up);
- if (ret) {
- vision_err("dqbuf, get_vs4l_container32 is fail(%ld)\n", ret);
- goto p_err;
- }
-
- ret = ops->vertexioc_dqbuf(file, &karg.vscl);
- put_vs4l_container32(&karg.vscl, up);
- break;
- default:
- vision_err("%x iocontrol is not supported(%lx, %zd)\n", cmd, VS4L_VERTEXIOC_S_FORMAT, sizeof(struct vs4l_format_list));
- break;
- }
-
-p_err:
- return ret;
-}
+++ /dev/null
-/*
- * Samsung Exynos SoC series VPU driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/poll.h>
-#include <asm/uaccess.h>
-
-#include "vision-config.h"
-#include "vision-dev.h"
-
-static struct vision_device *vision_device[VISION_NUM_DEVICES];
-static DEFINE_MUTEX(visiondev_lock);
-
-struct vision_device *vision_devdata(struct file *file)
-{
- return vision_device[iminor(file_inode(file))];
-}
-
-static int vision_open(struct inode *inode, struct file *file)
-{
- int ret = 0;
- struct vision_device *vdev;
-
- mutex_lock(&visiondev_lock);
-
- vdev = vision_devdata(file);
- if (!vdev) {
- mutex_unlock(&visiondev_lock);
- vision_err("vision device is NULL\n");
- ret = -ENODEV;
- goto p_err;
- }
-
- if (!(vdev->flags & (1 << VISION_DEVICE_TYPE_VERTEX))) {
- mutex_unlock(&visiondev_lock);
- vision_err("[V] vision device is not registered\n");
- ret = -ENODEV;
- goto p_err;
- }
-
- get_device(&vdev->dev);
- mutex_unlock(&visiondev_lock);
-
- ret = (vdev->fops->open ? vdev->fops->open(file) : -EINVAL);
- if (ret) {
- put_device(&vdev->dev);
- goto p_err;
- }
-
-p_err:
- return ret;
-}
-
-static int vision_release(struct inode *inode, struct file *file)
-{
- int ret = 0;
- struct vision_device *vdev = vision_devdata(file);
-
- ret = (vdev->fops->release ? vdev->fops->release(file) : -EINVAL);
- put_device(&vdev->dev);
-
- return 0;
-}
-
-static long vision_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
- struct vision_device *vdev = vision_devdata(file);
- struct mutex *lock = vdev->lock;
-
- if (!vdev->fops->ioctl) {
- vision_err("ioctl is not registered\n");
- ret = -ENOTTY;
- goto p_err;
- }
-
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
-
- ret = vdev->fops->ioctl(file, cmd, arg);
-
- if (lock)
- mutex_unlock(lock);
-
-p_err:
- return ret;
-}
-
-static long vision_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
- struct vision_device *vdev = vision_devdata(file);
- struct mutex *lock = vdev->lock;
-
- if (!vdev->fops->compat_ioctl) {
- vision_err("compat_ioctl is not registered\n");
- ret = -ENOTTY;
- goto p_err;
- }
-
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
-
- ret = vdev->fops->compat_ioctl(file, cmd, arg);
-
- if (lock)
- mutex_unlock(lock);
-
-p_err:
- return ret;
-}
-
-static unsigned int vision_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct vision_device *vdev = vision_devdata(file);
-
- if (!vdev->fops->poll)
- return DEFAULT_POLLMASK;
-
- return vdev->fops->poll(file, poll);
-}
-
-static const struct file_operations vision_fops = {
- .owner = THIS_MODULE,
- .open = vision_open,
- .release = vision_release,
- .unlocked_ioctl = vision_ioctl,
- .compat_ioctl = vision_compat_ioctl32,
- .poll = vision_poll,
- .llseek = no_llseek,
-};
-
-static ssize_t index_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct vision_device *vdev = container_of(dev, struct vision_device, dev);
-
- return sprintf(buf, "%i\n", vdev->index);
-}
-static DEVICE_ATTR_RO(index);
-
-static ssize_t debug_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct vision_device *vdev = container_of(dev, struct vision_device, dev);
-
- return sprintf(buf, "%i\n", vdev->debug);
-}
-
-static ssize_t debug_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct vision_device *vdev = container_of(dev, struct vision_device, dev);
- int res = 0;
- u16 value;
-
- res = kstrtou16(buf, 0, &value);
- if (res)
- return res;
-
- vdev->debug = value;
- return len;
-}
-static DEVICE_ATTR_RW(debug);
-
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct vision_device *vdev = container_of(dev, struct vision_device, dev);
-
- return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct attribute *vision_device_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_debug.attr,
- &dev_attr_index.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(vision_device);
-
-static struct class vision_class = {
- .name = VISION_NAME,
- .dev_groups = vision_device_groups,
-};
-
-static void vision_device_release(struct device *dev)
-{
- struct vision_device *vdev = container_of(dev, struct vision_device, dev);
-
- mutex_lock(&visiondev_lock);
- if (WARN_ON(vision_device[vdev->minor] != vdev)) {
- /* should not happen */
- mutex_unlock(&visiondev_lock);
- return;
- }
-
- /* Free up this device for reuse */
- vision_device[vdev->minor] = NULL;
-
- /* Delete the cdev on this minor as well */
- cdev_del(vdev->cdev);
- /* Just in case some driver tries to access this from
- the release() callback. */
- vdev->cdev = NULL;
-
- mutex_unlock(&visiondev_lock);
-
- /* Release video_device and perform other
- cleanups as needed. */
- vdev->release(vdev);
-}
-
-int vision_register_device(struct vision_device *vdev, int minor, struct module *owner)
-{
- int ret = 0;
- const char *name_base;
-
- BUG_ON(!vdev->parent);
- WARN_ON(vision_device[minor] != NULL);
-
- vdev->cdev = NULL;
-
- switch (vdev->type) {
- case VISION_DEVICE_TYPE_VERTEX:
- name_base = "vertex";
- break;
- default:
- printk(KERN_ERR "%s called with unknown type: %d\n", __func__, vdev->type);
- return -EINVAL;
- }
-
- /* Part 3: Initialize the character device */
- vdev->cdev = cdev_alloc();
- if (vdev->cdev == NULL) {
- ret = -ENOMEM;
- goto cleanup;
- }
- vdev->cdev->ops = &vision_fops;
- vdev->cdev->owner = owner;
- ret = cdev_add(vdev->cdev, MKDEV(VISION_MAJOR, minor), 1);
- if (ret < 0) {
- printk(KERN_ERR "%s: cdev_add failed\n", __func__);
- kfree(vdev->cdev);
- vdev->cdev = NULL;
- goto cleanup;
- }
-
- /* Part 4: register the device with sysfs */
- vdev->dev.class = &vision_class;
- vdev->dev.parent = vdev->parent;
- vdev->dev.devt = MKDEV(VISION_MAJOR, minor);
- dev_set_name(&vdev->dev, "%s%d", name_base, minor);
- ret = device_register(&vdev->dev);
- if (ret < 0) {
- printk(KERN_ERR "%s: device_register failed\n", __func__);
- goto cleanup;
- }
- /* Register the release callback that will be called when the last
- reference to the device goes away. */
- vdev->dev.release = vision_device_release;
-
- /* Part 6: Activate this minor. The char device can now be used. */
- set_bit(VISION_FL_REGISTERED, &vdev->flags);
-
- mutex_lock(&visiondev_lock);
- vdev->minor = minor;
- vision_device[minor] = vdev;
- mutex_unlock(&visiondev_lock);
-
- return 0;
-
-cleanup:
- mutex_lock(&visiondev_lock);
- if (vdev->cdev)
- cdev_del(vdev->cdev);
- mutex_unlock(&visiondev_lock);
- /* Mark this video device as never having been registered. */
- vdev->minor = -1;
- return ret;
-}
-EXPORT_SYMBOL(vision_register_device);
-
-static int __init visiondev_init(void)
-{
- int ret;
- dev_t dev = MKDEV(VISION_MAJOR, 0);
-
- printk(KERN_INFO "Linux vision interface: v1.00\n");
- ret = register_chrdev_region(dev, VISION_NUM_DEVICES, VISION_NAME);
- if (ret < 0) {
- printk(KERN_WARNING "videodev: unable to get major %d\n", VISION_MAJOR);
- return ret;
- }
-
- ret = class_register(&vision_class);
- if (ret < 0) {
- unregister_chrdev_region(dev, VISION_NUM_DEVICES);
- printk(KERN_WARNING "video_dev: class_register failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void __exit visiondev_exit(void)
-{
- dev_t dev = MKDEV(VISION_MAJOR, 0);
-
- class_unregister(&vision_class);
- unregister_chrdev_region(dev, VISION_NUM_DEVICES);
-}
-
-subsys_initcall(visiondev_init);
-module_exit(visiondev_exit)
-
-MODULE_AUTHOR("Gilyeon Lim <kilyeon.im@samsung.com>");
-MODULE_DESCRIPTION("Device registrar for Vision4Linux drivers v1");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(VISION_MAJOR);
+++ /dev/null
-/*
- * Samsung Exynos SoC series VISION driver
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <asm/uaccess.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-
-#include "vision-config.h"
-#include "vision-dev.h"
-#include "vision-ioctl.h"
-
-long vertex_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
- struct vision_device *vdev = vision_devdata(file);
- const struct vertex_ioctl_ops *ops = vdev->ioctl_ops;
- char sbuf[128];
- void *mbuf = NULL;
- void *parg = (void *)arg;
- unsigned int cmdsize = _IOC_SIZE(cmd);
-
- if (cmdsize <= sizeof(sbuf)) {
- parg = sbuf;
- } else {
- /* too big to allocate from stack */
- mbuf = kmalloc(cmdsize, GFP_KERNEL);
- if (NULL == mbuf) {
- vision_err("kmalloc is fail\n");
- ret = -ENOMEM;
- goto p_err;
- }
-
- parg = mbuf;
- }
-
- ret = copy_from_user(parg, (void __user *)arg, cmdsize);
- if (ret) {
- memcpy(parg, (void __user *)arg, cmdsize);
- ret = 0;
- //goto p_err;
- }
-
- switch (cmd) {
- case VS4L_VERTEXIOC_S_GRAPH:
- {
- ret = ops->vertexioc_s_graph(file, parg);
- if (ret) {
- vision_err("vertexioc_s_graph is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_S_FORMAT:
- {
- ret = ops->vertexioc_s_format(file, parg);
- if (ret) {
- vision_err("vertexioc_s_format is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_S_PARAM:
- {
- ret = ops->vertexioc_s_param(file, parg);
- if (ret) {
- vision_err("vertexioc_s_param is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_S_CTRL:
- {
- ret = ops->vertexioc_s_ctrl(file, parg);
- if (ret) {
- vision_err("vertexioc_s_ctrl is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_STREAM_ON:
- {
- ret = ops->vertexioc_streamon(file);
- if (ret) {
- vision_err("vertexioc_streamon is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_STREAM_OFF:
- {
- ret = ops->vertexioc_streamoff(file);
- if (ret) {
- vision_err("vertexioc_streamoff is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_QBUF:
- {
- ret = ops->vertexioc_qbuf(file, parg);
- if (ret) {
- vision_err("vertexioc_qbuf is fail(%d)\n", ret);
- goto p_err;
- }
- }
- break;
- case VS4L_VERTEXIOC_DQBUF:
- {
- ret = ops->vertexioc_dqbuf(file, parg);
- if (ret) {
- vision_err("vertexioc_dqbuf is fail(%d)\n", ret);
- goto p_err;
- }
- ret = copy_to_user((void __user *)arg, parg, cmdsize);
- if (ret) {
- vision_err("copy_to_user is fail(%d)\n", ret);
- memcpy((void __user *)arg, parg, cmdsize);
- ret = 0;
- goto p_err;
- }
- }
- break;
- default:
- vision_err("%x iocontrol is not supported(%lx, %zd)\n", cmd, VS4L_VERTEXIOC_S_FORMAT, sizeof(struct vs4l_format_list));
- break;
- }
-
-p_err:
- kfree(mbuf);
- return ret;
-}