ifeq ($(TARGET_BOARD_PLATFORM),exynos4)
-common_exynos4_dirs := libgralloc_ump libhdmi libhwcomposer libhwconverter libsecion
+common_exynos4_dirs := libgralloc_ump libhdmi libhwcomposer libhwconverter libsecion libUMP
ifneq ($(BOARD_USES_PROPRIETARY_LIBCAMERA),true)
common_exynos4_dirs += libcamera
+++ /dev/null
-#ifndef _LIB_ION_H_
-#define _LIB_ION_H_
-
-#include <unistd.h> /* size_t */
-
-#define ION_HEAP_SYSTEM_MASK (1 << 0)
-#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << 1)
-#define ION_HEAP_EXYNOS_MASK (1 << 4)
-#define ION_HEAP_EXYNOS_CONTIG_MASK (1 << 5)
-
-/* ION_MSYNC_FLAGS
- * values of @flags parameter to ion_msync()
- *
- * IMSYNC_DEV_TO_READ: Device only reads the buffer
- * IMSYNC_DEV_TO_WRITE: Device may writes to the buffer
- * IMSYNC_DEV_TO_RW: Device reads and writes to the buffer
- *
- * IMSYNC_SYNC_FOR_DEV: ion_msync() for device to access the buffer
- * IMSYNC_SYNC_FOR_CPU: ion_msync() for CPU to access the buffer after device
- * has accessed it.
- *
- * The values must be ORed with one of IMSYNC_DEV_* and one of IMSYNC_SYNC_*.
- * Otherwise, ion_msync() will not effect.
- */
-enum ION_MSYNC_FLAGS {
- IMSYNC_DEV_TO_READ = 0,
- IMSYNC_DEV_TO_WRITE = 1,
- IMSYNC_DEV_TO_RW = 2,
- IMSYNC_SYNC_FOR_DEV = 0x10000,
- IMSYNC_SYNC_FOR_CPU = 0x20000,
-};
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* ion_client
- * An ION client is an object or an entity that needs to use the service of
- * ION and has unique address space. ion_client is an identifier of an ION
- * client and it represents the ION client.
- * All operations on ION needs a valid ion_client value and it can be obtained
- * by ion_client_create().
- */
-typedef int ion_client;
-
-/* ion_buffer
- * An identifier of a buffer allocated from ION. You must obtain to access
- * a buffer allocated from ION. If you have an effective ion_buffer, you have
- * three options to work with it.
- * - To access the buffer, you can request an address (user virtual address)
- * of the buffer with ion_map().
- * - To pass the buffer to the kernel, you can pass the ion_buffer to the
- * kernel driver directly, if the kernel driver can work with ION.
- * - To pass the buffer to other processes, you can pass the ion_buffer to
- * other processes through RPC machanism such as socket communication or
- * Android Binder because ion_buffer is actually an open file descripotor
- * of the current process.
- */
-typedef int ion_buffer;
-
-/* ion_client_create()
- * @RETURN: new ion_client.
- * netative value if creating new ion_client is failed.
- *
- * A call to ion_client_create() must be paired with ion_client_destroy(),
- * symmetrically. ion_client_destroy() needs a valid ion_client that
- * is returned by ion_client_create().
- */
-ion_client ion_client_create(void);
-
-/* ion_client_destroy()
- * @client: An ion_client value to remove.
- */
-void ion_client_destroy(ion_client client);
-
-/* ion_alloc() - Allocates new buffer from ION.
- * @client: A valid ion_client value returned by ion_client_create().
- * @len: Size of a buffer required in bytes.
- * @align: Alignment requirements of @len and the start address of the allocated
- * buffer. If the @len is not aligned by @align, ION allocates a buffer
- * that is aligned by @align and the size of the buffer will be larger
- * than @len.
- * @flags: Additional requirements about buffer. ION_HEAP_SYSTEM_CONTIG_MASK
- * for allocating physically contiguous buffer and ION_HEAP_SYSTEM_MASK
- * for virtually contiguous buffer. You can combine those flags or
- * simply give -1(0xFFFFFFFF) if you do not care about the contiguouty
- * of the buffer.
- * @RETURN: An ion_buffer that represents the buffer allocated. It is only
- * unique in the context of the given client, @client.
- * -error if the allocation failed.
- * See the description of ion_buffer above for detailed information.
- */
-ion_buffer ion_alloc(ion_client client, size_t len, size_t align,
- unsigned int flags);
-
-/* ion_free() - Frees an existing buffer that is allocated by ION
- * @buffer: An ion_buffer of the buffer to be released.
- */
-void ion_free(ion_buffer buffer);
-
-/* ion_map() - Obtains a virtual address of the buffer identied by @buffer
- * @buffer: The buffer to map. The virtual address returned is allocated by the
- * kernel.
- * @len: The size of the buffer to map. This must not exceed the size of the
- * buffer represented by @fd_buf. Thus you need to know the size of it
- * before calling this function. If @len is less than the size of the
- * buffer, this function just map just the size requested (@len) not the
- * entire buffer.
- * @offset: How many pages will be ignored while mapping.@offset number of
- * pages from the start of the buffer will not be mapped.
- * @RETURN: The start virtual addres mapped.
- * MAP_FAILED if mapping fails.
- *
- * Note that @len + (@offset * PAGE_SIZE) must not exceed the size of the
- * buffer.
- */
-void *ion_map(ion_buffer buffer, size_t len, off_t offset);
-
-/* ion_unmap() - Frees the buffer mapped by ion_map()
- * @addr: The address returned by ion_map().
- * @len: The size of the buffer mapped by ion_map().
- * @RETURN: 0 on success, and -1 on failure.
- * errno is also set on failure.
- */
-int ion_unmap(void *addr, size_t len);
-
-/* ion_msync() - Makes sure that data in the buffer are visible to H/W peri.
- * @client: A valid ion_client value returned by ion_client_create().
- * @buffer: The buffer to perform ion_msync().
- * @flags: Direction of access of H/W peri and CPU. See the description of
- * ION_MSYNC_FLAGS.
- * @size: Size to ion_msync() in bytes.
- * @offset: Where ion_msync() start in @buffer, size in bytes.
- * @RETURN: 0 if successful. -error, otherwise.
- *
- * Note that @offset + @size must not exceed the size of @buffer.
- */
-int ion_msync(ion_client client, ion_buffer buffer, long flags,
- size_t size, off_t offset);
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* _LIB_ION_H_ */
#ifndef _LIB_SECION_H_
#define _LIB_SECION_H_
-#include <unistd.h>
+#include <unistd.h> /* size_t */
-typedef unsigned long ion_phys_addr_t;
+
+/* ion_client
+ * An ION client is an object or an entity that needs to use the service of
+ * ION and has unique address space. ion_client is an identifier of an ION
+ * client and it represents the ION client.
+ * All operations on ION needs a valid ion_client value and it can be obtained
+ * by ion_client_create().
+ */
typedef int ion_client;
+
+/* ion_buffer
+ * An identifier of a buffer allocated from ION. You must obtain to access
+ * a buffer allocated from ION. If you have an effective ion_buffer, you have
+ * three options to work with it.
+ * - To access the buffer, you can request an address (user virtual address)
+ * of the buffer with ion_map().
+ * - To pass the buffer to the kernel, you can pass the ion_buffer to the
+ * kernel driver directly, if the kernel driver can work with ION.
+ * - To pass the buffer to other processes, you can pass the ion_buffer to
+ * other processes through RPC machanism such as socket communication or
+ * Android Binder because ion_buffer is actually an open file descripotor
+ * of the current process.
+ */
typedef int ion_buffer;
+typedef unsigned long ion_phys_addr_t;
+
+
+#define ION_HEAP_SYSTEM_MASK (1 << 0)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << 1)
+#define ION_HEAP_EXYNOS_CONTIG_MASK (1 << 4)
+#define ION_HEAP_EXYNOS_MASK (1 << 5)
+#define ION_HEAP_EXYNOS_USER_MASK (1 << 6)
+#define ION_EXYNOS_NONCACHE_MASK (1 << (32 - 2)) /* it was BITS_PER_LONG */
+#define ION_EXYNOS_WRITE_MASK (1 << (32 - 1)) /* it was BITS_PER_LONG */
+
+/* ION_MSYNC_FLAGS
+ * values of @flags parameter to ion_msync()
+ *
+ * IMSYNC_DEV_TO_READ: Device only reads the buffer
+ * IMSYNC_DEV_TO_WRITE: Device may writes to the buffer
+ * IMSYNC_DEV_TO_RW: Device reads and writes to the buffer
+ *
+ * IMSYNC_SYNC_FOR_DEV: ion_msync() for device to access the buffer
+ * IMSYNC_SYNC_FOR_CPU: ion_msync() for CPU to access the buffer after device
+ * has accessed it.
+ *
+ * The values must be ORed with one of IMSYNC_DEV_* and one of IMSYNC_SYNC_*.
+ * Otherwise, ion_msync() will not effect.
+ */
enum ION_MSYNC_FLAGS {
IMSYNC_DEV_TO_READ = 0,
IMSYNC_DEV_TO_WRITE = 1,
extern "C" {
#endif
+/* ion_client_create()
+ * @RETURN: new ion_client.
+ * netative value if creating new ion_client is failed.
+ *
+ * A call to ion_client_create() must be paired with ion_client_destroy(),
+ * symmetrically. ion_client_destroy() needs a valid ion_client that
+ * is returned by ion_client_create().
+ */
ion_client ion_client_create(void);
+
+/* ion_client_destroy()
+ * @client: An ion_client value to remove.
+ */
void ion_client_destroy(ion_client client);
+
+/* ion_alloc() - Allocates new buffer from ION.
+ * @client: A valid ion_client value returned by ion_client_create().
+ * @len: Size of a buffer required in bytes.
+ * @align: Alignment requirements of @len and the start address of the allocated
+ * buffer. If the @len is not aligned by @align, ION allocates a buffer
+ * that is aligned by @align and the size of the buffer will be larger
+ * than @len.
+ * @flags: Additional requirements about buffer. ION_HEAP_SYSTEM_CONTIG_MASK
+ * for allocating physically contiguous buffer and ION_HEAP_SYSTEM_MASK
+ * for virtually contiguous buffer. You can combine those flags or
+ * simply give -1(0xFFFFFFFF) if you do not care about the contiguouty
+ * of the buffer.
+ * @RETURN: An ion_buffer that represents the buffer allocated. It is only
+ * unique in the context of the given client, @client.
+ * -error if the allocation failed.
+ * See the description of ion_buffer above for detailed information.
+ */
ion_buffer ion_alloc(ion_client client, size_t len, size_t align, unsigned int flags);
+
+/* ion_free() - Frees an existing buffer that is allocated by ION
+ * @buffer: An ion_buffer of the buffer to be released.
+ */
void ion_free(ion_buffer buffer);
+
+/* ion_map() - Obtains a virtual address of the buffer identied by @buffer
+ * @buffer: The buffer to map. The virtual address returned is allocated by the
+ * kernel.
+ * @len: The size of the buffer to map. This must not exceed the size of the
+ * buffer represented by @fd_buf. Thus you need to know the size of it
+ * before calling this function. If @len is less than the size of the
+ * buffer, this function just map just the size requested (@len) not the
+ * entire buffer.
+ * @offset: How many pages will be ignored while mapping.@offset number of
+ * pages from the start of the buffer will not be mapped.
+ * @RETURN: The start virtual addres mapped.
+ * MAP_FAILED if mapping fails.
+ *
+ * Note that @len + (@offset * PAGE_SIZE) must not exceed the size of the
+ * buffer.
+ */
void *ion_map(ion_buffer buffer, size_t len, off_t offset);
+
+/* ion_unmap() - Frees the buffer mapped by ion_map()
+ * @addr: The address returned by ion_map().
+ * @len: The size of the buffer mapped by ion_map().
+ * @RETURN: 0 on success, and -1 on failure.
+ * errno is also set on failure.
+ */
int ion_unmap(void *addr, size_t len);
-int ion_msync(ion_client client, ion_buffer buffer, enum ION_MSYNC_FLAGS flags, size_t size, off_t offset);
+
+/* ion_msync() - Makes sure that data in the buffer are visible to H/W peri.
+ * @client: A valid ion_client value returned by ion_client_create().
+ * @buffer: The buffer to perform ion_msync().
+ * @flags: Direction of access of H/W peri and CPU. See the description of
+ * ION_MSYNC_FLAGS.
+ * @size: Size to ion_msync() in bytes.
+ * @offset: Where ion_msync() start in @buffer, size in bytes.
+ * @RETURN: 0 if successful. -error, otherwise.
+ *
+ * Note that @offset + @size must not exceed the size of @buffer.
+ */
+int ion_msync(ion_client client, ion_buffer buffer, long flags, size_t size, off_t offset);
+
+
+
+
ion_phys_addr_t ion_getphys(ion_client client, ion_buffer buffer);
int createIONMem(struct secion_param *param, size_t size, unsigned int flags);
int destroyIONMem(struct secion_param *param);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
/**
#include "ump_platform.h"
-#include "ion.h"
#ifdef __cplusplus
extern "C" {
#endif
*/
typedef enum
{
- UMP_OK, /**< indicates success */
- UMP_ERROR, /**< indicates failure */
+ UMP_OK = 0, /**< indicates success */
+ UMP_ERROR, /**< indicates failure */
} ump_result;
* This function retrieves a memory mapped pointer to the specified UMP memory,
* that can be used by the CPU. Every successful call to
* @ref ump_mapped_pointer_get "ump_mapped_pointer_get" is reference counted,
- * and must therefor be followed by a call to
+ * and must therefore be followed by a call to
* @ref ump_mapped_pointer_release "ump_mapped_pointer_release " when the
* memory mapping is no longer needed.
*
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
/**
#else
-#define UMP_API_EXPORT
+#if defined(__GNUC__)
+#if __GNUC__ >= 4
+# define MALI_VISIBLE __attribute__ ((visibility ("default"))) /**< Function should be visible from outside the dll */
+#else
+# define MALI_VISIBLE
+#endif
+
+#elif defined(__ARMCC_VERSION)
+/* ARMCC specific */
+# define MALI_VISIBLE __declspec(dllexport)
+
+#else
+# define MALI_VISIBLE
+
+#endif
+
+#define UMP_API_EXPORT MALI_VISIBLE
#endif
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
/**
extern "C" {
#endif
-typedef enum {
- /* This enum must match with the IOCTL enum in ump_ioctl.h */
- UMP_REF_DRV_CONSTRAINT_NONE = 0,
- UMP_REF_DRV_CONSTRAINT_PHYSICALLY_LINEAR = 1,
- /* This enum is included by samsung */
- UMP_REF_DRV_CONSTRAINT_USE_CACHE = 128,
+typedef enum
+{
+ /* This enum must match with the enum found in
+ * <kernel_directory>/drivers/gpu/mali400/<ver>/ump/include/ump_ref_drv.h */
+ UMP_REF_DRV_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_CONSTRAINT_USE_CACHE = 128,
} ump_alloc_constraints;
/** Allocate an UMP handle containing a memory buffer.
UMP_API_EXPORT ump_handle ump_ref_drv_allocate(unsigned long size, ump_alloc_constraints usage);
UMP_API_EXPORT ump_handle ump_ref_drv_ion_import(int ion_fd, ump_alloc_constraints constraints);
-typedef enum {
+
+typedef enum
+{
UMP_MSYNC_CLEAN = 0 ,
UMP_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ UMP_MSYNC_INVALIDATE = 2,
UMP_MSYNC_READOUT_CACHE_ENABLED = 128,
} ump_cpu_msync_op;
+typedef enum
+{
+ UMP_READ = 1,
+ UMP_READ_WRITE = 3,
+} ump_lock_usage;
+
/** Flushing cache for an ump_handle.
* The function will always CLEAN_AND_INVALIDATE as long as the \a op is not UMP_MSYNC_READOUT_CACHE_ENABLED.
* If so it will only report back if the given ump_handle is cacheable.
* Return value is 1 if cache is enabled, and 0 if it is disabled for the given allocation.*/
UMP_API_EXPORT int ump_cpu_msync_now(ump_handle mem, ump_cpu_msync_op op, void* address, int size);
+
+typedef enum
+{
+ UMP_USED_BY_CPU = 0,
+ UMP_USED_BY_MALI = 1,
+ UMP_USED_BY_UNKNOWN_DEVICE = 100,
+} ump_hw_usage;
+
+typedef enum
+{
+ UMP_CACHE_OP_START = 0,
+ UMP_CACHE_OP_FINISH = 1,
+} ump_cache_op_control;
+
+/** Cache operation control. Tell when cache maintenance operations start and end.
+This will allow the kernel to merge cache operations togheter, thus making them faster */
+UMP_API_EXPORT int ump_cache_operations_control(ump_cache_op_control op);
+
+/** Memory synchronization - cache flushing if previous user was different hardware */
+UMP_API_EXPORT int ump_switch_hw_usage( ump_handle mem, ump_hw_usage new_user );
+
+/** Memory synchronization - cache flushing if previous user was different hardware */
+UMP_API_EXPORT int ump_switch_hw_usage_secure_id( ump_secure_id ump_id, ump_hw_usage new_user );
+
+/** Locking buffer. Blocking call if the buffer is already locked. */
+UMP_API_EXPORT int ump_lock( ump_handle mem, ump_lock_usage lock_usage );
+
+/** Locking buffer. Blocking call if the buffer is already locked. */
+UMP_API_EXPORT int ump_lock_secure_id( ump_secure_id ump_id, ump_lock_usage lock_usage );
+
+/** Unlocking buffer. Let other users lock the buffer for their usage */
+UMP_API_EXPORT int ump_unlock( ump_handle mem );
+
+/** Unlocking buffer. Let other users lock the buffer for their usage */
+UMP_API_EXPORT int ump_unlock_secure_id( ump_secure_id ump_id );
+
+
#ifdef __cplusplus
}
#endif
--- /dev/null
+#
+# Copyright (C) 2010 ARM Limited. All rights reserved.
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifeq ($(TARGET_BOARD_PLATFORM),exynos4)
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_PRELINK_MODULE := false
+LOCAL_SHARED_LIBRARIES := liblog libcutils
+
+LOCAL_CFLAGS:= -DLOG_TAG=\"UMP\"
+
+LOCAL_C_INCLUDES:= \
+ $(LOCAL_PATH)/../include \
+ $(LOCAL_PATH)/include
+
+UMP_SRCS := \
+ arch_011_udd/ump_frontend.c \
+ arch_011_udd/ump_ref_drv.c \
+ arch_011_udd/ump_arch.c \
+ os/linux/ump_uku.c \
+ os/linux/ump_osu_memory.c \
+ os/linux/ump_osu_locks.c
+
+LOCAL_SRC_FILES := $(UMP_SRCS)
+
+LOCAL_MODULE := libUMP
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
+
+endif
--- /dev/null
+/*
+ * Copyright (C) 2011, 2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+Building the UMP user space library for Linux
+---------------------------------------------
+
+A simple Makefile is provided, and the UMP user space library can be built
+simply by issuing make. This Makefile is setup to use the ARM GCC compiler
+from CodeSourcery, and it builds for ARMv6. Modification to this Makefile
+is needed in order to build for other configurations.
+
+In order to use this library from the Mali GPU driver, invoke the Mali GPU
+driver build system with the following two make variables set;
+- UMP_INCLUDE_DIR should point to the include folder inside this package
+- UMP_LIB should point to the built library (libUMP.so)
+
+This does not apply to Android builds, where the Android.mk file for the
+Mali GPU driver needs to be manually edited in order to add the correct
+include path and link against the correct library.
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_arch.c
+ *
+ * UMP arch layer for UMP-UDD
+ */
+
+#include "ump.h"
+#include "ump_arch.h"
+#include <ump/ump_debug.h>
+
+#include <ump/ump_uk_types.h>
+#include "../os/ump_uku.h"
+
+#include <cutils/log.h>
+
+/** Pointer to an OS-Specific context that we should pass in _uku_ calls */
+void *ump_uk_ctx = NULL;
+
+/** Reference counting of ump_arch_open() and ump_arch_close(). */
+static volatile int ump_ref_count = 0;
+
+/** Lock for critical section in open/close */
+_ump_osu_lock_t * ump_lock_arch = NULL;
+
+ump_result ump_arch_open(void)
+{
+ ump_result retval = UMP_OK;
+
+ _ump_osu_lock_auto_init( &ump_lock_arch, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0 );
+
+ /* Check that the lock was initialized */
+ if (NULL == ump_lock_arch)
+ {
+ UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to init lock\n"));
+ return UMP_ERROR;
+ }
+
+ /* Attempt to obtain a lock */
+ if( _UMP_OSU_ERR_OK != _ump_osu_lock_wait( ump_lock_arch, _UMP_OSU_LOCKMODE_RW ) )
+ {
+ UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to acquire lock\n"));
+ return UMP_ERROR;
+ }
+
+ /* ASSERT NEEDED */
+ UMP_DEBUG_ASSERT(0 <= ump_ref_count, ("UMP: Reference count invalid at _ump_base_arch_open()"));
+ ump_ref_count++;
+
+ if (1 == ump_ref_count)
+ {
+ /* We are the first, open the UMP device driver */
+
+ if (_UMP_OSU_ERR_OK != _ump_uku_open( &ump_uk_ctx ))
+ {
+ UMP_DEBUG_PRINT(1, ("UMP: ump_arch_open() failed to open UMP device driver\n"));
+ retval = UMP_ERROR;
+ ump_ref_count--;
+ }
+ }
+
+ /* Signal the lock so someone else can use it */
+ _ump_osu_lock_signal( ump_lock_arch, _UMP_OSU_LOCKMODE_RW );
+
+ return retval;
+}
+
+
+
+void ump_arch_close(void)
+{
+ _ump_osu_lock_auto_init( &ump_lock_arch, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0 );
+
+ /* Check that the lock was initialized */
+ if(NULL == ump_lock_arch)
+ {
+ UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to init lock\n"));
+ return;
+ }
+
+ /* Attempt to obtain a lock */
+ if( _UMP_OSU_ERR_OK != _ump_osu_lock_wait( ump_lock_arch, _UMP_OSU_LOCKMODE_RW ) )
+ {
+ UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to acquire lock\n"));
+ return;
+ }
+
+ UMP_DEBUG_ASSERT(0 < ump_ref_count, ("UMP: ump_arch_close() called while no references exist"));
+ if (ump_ref_count > 0)
+ {
+ ump_ref_count--;
+ if (0 == ump_ref_count)
+ {
+ _ump_osu_errcode_t retval = _ump_uku_close(&ump_uk_ctx);
+ UMP_DEBUG_ASSERT(retval == _UMP_OSU_ERR_OK, ("UMP: Failed to close UMP interface"));
+ UMP_IGNORE(retval);
+ ump_uk_ctx = NULL;
+ _ump_osu_lock_signal( ump_lock_arch, _UMP_OSU_LOCKMODE_RW );
+ _ump_osu_lock_term( ump_lock_arch ); /* Not 100% thread safe, since another thread can already be waiting for this lock in ump_arch_open() */
+ ump_lock_arch = NULL;
+ return;
+ }
+ }
+
+ /* Signal the lock so someone else can use it */
+ _ump_osu_lock_signal( ump_lock_arch, _UMP_OSU_LOCKMODE_RW );
+}
+
+
+
+ump_secure_id ump_arch_allocate(unsigned long * size, ump_alloc_constraints constraints)
+{
+ _ump_uk_allocate_s call_arg;
+
+ if ( NULL == size )
+ {
+ return UMP_INVALID_SECURE_ID;
+ }
+
+ call_arg.ctx = ump_uk_ctx;
+ call_arg.secure_id = UMP_INVALID_SECURE_ID;
+ call_arg.size = *size;
+#ifdef UMP_DEBUG_SKIP_CODE
+ /** Run-time ASSERTing that _ump_uk_api_version_s and ump_alloc_constraints are
+ * interchangable */
+ switch (constraints)
+ {
+ case UMP_REF_DRV_CONSTRAINT_NONE:
+ UMP_DEBUG_ASSERT( UMP_REF_DRV_UK_CONSTRAINT_NONE == constraints, ("ump_uk_alloc_constraints out of sync with ump_alloc_constraints") );
+ break;
+ case UMP_REF_DRV_CONSTRAINT_PHYSICALLY_LINEAR:
+ UMP_DEBUG_ASSERT( UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR == constraints, ("ump_uk_alloc_constraints out of sync with ump_alloc_constraints") );
+ break;
+ default:
+ UMP_DEBUG_ASSERT( 1, ("ump_uk_alloc_constraints out of sync with ump_alloc_constraints: %d unrecognized", constraints) );
+ break;
+ }
+#endif
+ call_arg.constraints = (ump_uk_alloc_constraints)constraints;
+
+ if ( _UMP_OSU_ERR_OK != _ump_uku_allocate(&call_arg) )
+ {
+ return UMP_INVALID_SECURE_ID;
+ }
+
+ *size = call_arg.size;
+
+ UMP_DEBUG_PRINT(4, ("UMP: Allocated ID %u, size %ul", call_arg.secure_id, call_arg.size));
+
+ return call_arg.secure_id;
+}
+
+
+ump_secure_id ump_arch_ion_import(int ion_fd, unsigned long *size, ump_alloc_constraints constraints)
+{
+ _ump_uk_ion_import_s call_arg;
+
+ call_arg.ctx = ump_uk_ctx;
+ call_arg.ion_fd = ion_fd;
+ call_arg.secure_id = UMP_INVALID_SECURE_ID;
+ call_arg.size = *size;
+ call_arg.constraints = (ump_uk_alloc_constraints)constraints;
+
+ UMP_DEBUG_PRINT(4, ("%s ion_fd=%d size=%x constraints=x", __func__, ion_fd, size, constraints));
+
+ if ( _UMP_OSU_ERR_OK != _ump_uku_ion_import(&call_arg) )
+ {
+ return UMP_INVALID_SECURE_ID;
+ }
+
+ *size = call_arg.size;
+
+ UMP_DEBUG_PRINT(4, ("%s Allocated ID %u, size %ul", __func__, call_arg.secure_id, call_arg.size));
+
+ return call_arg.secure_id;
+}
+
+
+unsigned long ump_arch_size_get(ump_secure_id secure_id)
+{
+ _ump_uk_size_get_s dd_size_call_arg;
+
+ dd_size_call_arg.ctx = ump_uk_ctx;
+ dd_size_call_arg.secure_id = secure_id;
+ dd_size_call_arg.size = 0;
+
+ if (_UMP_OSU_ERR_OK == _ump_uku_size_get( &dd_size_call_arg ) )
+ {
+ return dd_size_call_arg.size;
+ }
+
+ return 0;
+}
+
+
+void ump_arch_reference_release(ump_secure_id secure_id)
+{
+ _ump_uk_release_s dd_release_call_arg;
+ _ump_osu_errcode_t retval;
+
+ dd_release_call_arg.ctx = ump_uk_ctx;
+ dd_release_call_arg.secure_id = secure_id;
+
+ UMP_DEBUG_PRINT(4, ("UMP: Releasing ID %u", secure_id));
+
+ retval = _ump_uku_release( &dd_release_call_arg );
+ UMP_DEBUG_ASSERT(retval == _UMP_OSU_ERR_OK, ("UMP: Failed to release reference to UMP memory"));
+ UMP_IGNORE(retval);
+}
+
+
+void* ump_arch_map(ump_secure_id secure_id, unsigned long size, ump_cache_enabled cache, unsigned long *cookie_out)
+{
+ _ump_uk_map_mem_s dd_map_call_arg;
+
+ UMP_DEBUG_ASSERT_POINTER( cookie_out );
+
+ dd_map_call_arg.ctx = ump_uk_ctx;
+ dd_map_call_arg.secure_id = secure_id;
+ dd_map_call_arg.size = size;
+ dd_map_call_arg.is_cached = (u32) (UMP_CACHE_ENABLE==cache);
+
+ if ( -1 == _ump_uku_map_mem( &dd_map_call_arg ) )
+ {
+ UMP_DEBUG_PRINT(4, ("UMP: Mapping failed for ID %u", secure_id));
+ return NULL;
+ }
+
+ UMP_DEBUG_PRINT(4, ("Mapped %u at 0x%08lx", secure_id, (unsigned long)dd_map_call_arg.mapping));
+
+ *cookie_out = dd_map_call_arg.cookie;
+ return dd_map_call_arg.mapping;
+}
+
+
+
+void ump_arch_unmap(void* mapping, unsigned long size, unsigned long cookie)
+{
+ _ump_uk_unmap_mem_s dd_unmap_call_arg;
+
+ dd_unmap_call_arg.ctx = ump_uk_ctx;
+ dd_unmap_call_arg.mapping = mapping;
+ dd_unmap_call_arg.size = size;
+ dd_unmap_call_arg.cookie = cookie;
+
+ UMP_DEBUG_PRINT(4, ("Unmapping 0x%08lx", (unsigned long)mapping));
+ _ump_uku_unmap_mem( &dd_unmap_call_arg );
+}
+
+/** Memory synchronization - cache flushing of mapped memory */
+ump_cache_enabled ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie, void * address, unsigned long size, ump_cpu_msync_op op)
+{
+ _ump_uk_msync_s dd_msync_call_arg;
+
+ dd_msync_call_arg.ctx = ump_uk_ctx;
+ dd_msync_call_arg.mapping = mapping;
+ dd_msync_call_arg.address = address;
+ dd_msync_call_arg.size = size;
+ dd_msync_call_arg.op = (ump_uk_msync_op)op;
+ dd_msync_call_arg.cookie = cookie;
+ dd_msync_call_arg.secure_id = secure_id;
+ dd_msync_call_arg.is_cached = 0;
+
+ UMP_DEBUG_PRINT(4, ("Msync 0x%08lx", (unsigned long)mapping));
+ _ump_uku_msynch( &dd_msync_call_arg );
+ if ( 0==dd_msync_call_arg.is_cached )
+ {
+ UMP_DEBUG_PRINT(4, ("Trying to flush uncached UMP mem ID: %d", secure_id));
+ }
+ return (ump_cache_enabled)(dd_msync_call_arg.is_cached);
+}
+
+/** Cache operation control. Tell when cache maintenance operations start and end.
+This will allow the kernel to merge cache operations togheter, thus making them faster */
+int ump_arch_cache_operations_control(ump_cache_op_control op)
+{
+ _ump_uk_cache_operations_control_s dd_cache_control_arg;
+
+ dd_cache_control_arg.op = (ump_uk_cache_op_control)op;
+ dd_cache_control_arg.ctx = ump_uk_ctx;
+
+ UMP_DEBUG_PRINT(4, ("Cache control op:%d",(u32)op ));
+ _ump_uku_cache_operations_control( &dd_cache_control_arg );
+ return 1; /* Always success */
+}
+
+int ump_arch_switch_hw_usage( ump_secure_id secure_id, ump_hw_usage new_user )
+{
+ _ump_uk_switch_hw_usage_s dd_sitch_user_arg;
+
+ dd_sitch_user_arg.secure_id = secure_id;
+ dd_sitch_user_arg.new_user = (ump_uk_user)new_user;
+ dd_sitch_user_arg.ctx = ump_uk_ctx;
+
+ UMP_DEBUG_PRINT(4, ("Switch user UMP:%d User:%d",secure_id, (u32)new_user ));
+ _ump_uku_switch_hw_usage( &dd_sitch_user_arg );
+ return 1; /* Always success */
+}
+
+int ump_arch_lock( ump_secure_id secure_id, ump_lock_usage lock_usage )
+{
+ _ump_uk_lock_s dd_lock_arg;
+
+ dd_lock_arg.ctx = ump_uk_ctx;
+ dd_lock_arg.secure_id = secure_id;
+ dd_lock_arg.lock_usage = (ump_uk_lock_usage) lock_usage;
+
+ UMP_DEBUG_PRINT(4, ("Lock UMP:%d ",secure_id));
+ _ump_uku_lock( &dd_lock_arg );
+ return 1; /* Always success */
+}
+
+int ump_arch_unlock( ump_secure_id secure_id )
+{
+ _ump_uk_unlock_s dd_unlock_arg;
+
+ dd_unlock_arg.ctx = ump_uk_ctx;
+ dd_unlock_arg.secure_id = secure_id;
+
+ UMP_DEBUG_PRINT(4, ("Lock UMP:%d ",secure_id));
+ _ump_uku_unlock( &dd_unlock_arg );
+ return 1; /* Always success */
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_arch.h
+ *
+ * Header file for the arch dependent backend, which will do the communication with the UMP device driver.
+ */
+
+#ifndef _UNIFIED_MEMORY_PROVIDER_ARCH_H_
+#define _UNIFIED_MEMORY_PROVIDER_ARCH_H_
+
+#include "ump.h"
+#include "ump_ref_drv.h"
+#include "ump_internal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+/** Open UMP interface. */
+ump_result ump_arch_open(void);
+
+/** Close UMP interface. */
+void ump_arch_close(void);
+
+/** Allocate UMP memory. */
+ump_secure_id ump_arch_allocate(unsigned long * size, ump_alloc_constraints constraints);
+
+/** Query size of specified UMP memory, in bytes. */
+unsigned long ump_arch_size_get(ump_secure_id secure_id);
+
+/** Release a reference from specified UMP memory. */
+void ump_arch_reference_release(ump_secure_id secure_id);
+
+/** Map specified UMP memory into CPU address space */
+void* ump_arch_map(ump_secure_id secure_id, unsigned long size, ump_cache_enabled cache, unsigned long *cookie_out);
+
+/** Unmap specified UMP memory from CPU adderss space */
+void ump_arch_unmap(void* mapping, unsigned long size, unsigned long cookie);
+
+/** Memory synchronization - cache flushing of mapped memory
+ * @return Is_cached: 1==True 0==NonCached */
+ump_cache_enabled ump_arch_msync(ump_secure_id secure_id, void* mapping, unsigned long cookie, void * address, unsigned long size, ump_cpu_msync_op op);
+
+/** Cache operation control. Tell when cache maintenance operations start and end.
+This will allow the kernel to merge cache operations togheter, thus making them faster */
+int ump_arch_cache_operations_control(ump_cache_op_control op);
+
+/** Memory synchronization - cache flushing if previous user was different hardware */
+int ump_arch_switch_hw_usage( ump_secure_id secure_id, ump_hw_usage new_user );
+
+/** Locking buffer. Blocking call if the buffer is already locked. */
+int ump_arch_lock( ump_secure_id secure_id, ump_lock_usage lock_usage );
+
+/** Unlocking buffer. Let other users lock the buffer for their usage */
+int ump_arch_unlock( ump_secure_id secure_id );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _UNIFIED_MEMORY_PROVIDER_ARCH_H_ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_frontend.c
+ *
+ * This file implements the user space API of the UMP API.
+ * It relies heavily on a arch backend to do the communication with the UMP device driver.
+ */
+
+#include "ump.h"
+#include "ump_internal.h"
+#include "ump_arch.h"
+#include <ump/ump_debug.h>
+#include <ump/ump_osu.h>
+
+UMP_API_EXPORT ump_result ump_open(void)
+{
+ return ump_arch_open();
+}
+
+UMP_API_EXPORT void ump_close(void)
+{
+ ump_arch_close();
+}
+
+UMP_API_EXPORT ump_secure_id ump_secure_id_get(ump_handle memh)
+{
+ ump_mem * mem = (ump_mem*)memh;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != mem->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < mem->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < mem->size, ("Memory size of passed handle too low"));
+
+ return mem->secure_id;
+}
+
+UMP_API_EXPORT ump_handle ump_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ unsigned long size;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != secure_id, ("Secure ID is invalid"));
+
+ size = ump_arch_size_get(secure_id);
+ if (0 != size)
+ {
+ unsigned long cookie;
+ /*
+ * The UMP memory which the secure_id referes to could now be deleted and re-created
+ * since we don't have any references to it yet. The mapping below will however fail if
+ * we have supplied incorrect size, so we are safe.
+ */
+ void * mapping = ump_arch_map(secure_id, size, UMP_CACHE_DISABLE, &cookie);
+ if (NULL != mapping)
+ {
+ ump_mem * mem = _ump_osu_calloc(1, sizeof(*mem));
+ if (NULL != mem)
+ {
+ mem->secure_id = secure_id;
+ mem->mapped_mem = mapping;
+ mem->size = size;
+ mem->cookie = cookie;
+ mem->is_cached = UMP_CACHE_ENABLE; /* Is set to actually check in the ump_cpu_msync_now() function */
+
+ _ump_osu_lock_auto_init(&mem->ref_lock, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0);
+ UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n"));
+ mem->ref_count = 1;
+
+ /* This is called only to set the cache settings in this handle */
+ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0);
+
+ UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem));
+
+ return (ump_handle)mem;
+ }
+
+ ump_arch_unmap(mapping, size, cookie);
+ }
+ }
+
+ UMP_DEBUG_PRINT(2, ("UMP handle creation failed for ID %u", secure_id));
+
+ return UMP_INVALID_MEMORY_HANDLE;
+}
+
+UMP_API_EXPORT unsigned long ump_size_get(ump_handle memh)
+{
+ ump_mem * mem = (ump_mem*)memh;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != mem->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < mem->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < mem->size, ("Memory size of passed handle too low"));
+
+ return mem->size;
+}
+
+UMP_API_EXPORT void ump_read(void *dst, ump_handle srch, unsigned long offset, unsigned long length)
+{
+ ump_mem * src = (ump_mem*)srch;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != srch, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != src->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < src->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < src->size, ("Memory size of passed handle too low"));
+ UMP_DEBUG_ASSERT(NULL != src->mapped_mem, ("UMP Memory is not mapped"));
+ UMP_DEBUG_ASSERT((src->size) >= (offset + length), ("Requested read beyond end of UMP memory"));
+
+ _ump_osu_memcpy(dst,(char*)(src->mapped_mem) + offset, length);
+}
+
+UMP_API_EXPORT void ump_write(ump_handle dsth, unsigned long offset, const void *src, unsigned long length)
+{
+ ump_mem * dst = (ump_mem*)dsth;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != dsth, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != dst->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < dst->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < dst->size, ("Memory size of passed handle too low"));
+ UMP_DEBUG_ASSERT(NULL != dst->mapped_mem, ("UMP Memory is not mapped"));
+ UMP_DEBUG_ASSERT((dst->size) >= (offset + length), ("Requested write beyond end of UMP memory"));
+
+ _ump_osu_memcpy((char*)(dst->mapped_mem) + offset, src, length);
+}
+
+
+
+UMP_API_EXPORT void* ump_mapped_pointer_get(ump_handle memh)
+{
+ ump_mem * mem = (ump_mem*)memh;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != mem->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < mem->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < mem->size, ("Memory size of passed handle too low"));
+ UMP_DEBUG_ASSERT(NULL != mem->mapped_mem, ("Error in mapping pointer (not mapped)"));
+
+ return mem->mapped_mem;
+}
+
+
+
+UMP_API_EXPORT void ump_mapped_pointer_release(ump_handle memh)
+{
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != ((ump_mem*)memh)->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < ((ump_mem*)memh)->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < ((ump_mem*)memh)->size, ("Memory size of passed handle too low"));
+ UMP_DEBUG_ASSERT(NULL != ((ump_mem*)memh)->mapped_mem, ("Error in mapping pointer (not mapped)"));
+
+ /* noop, cos we map in the pointer when handle is created, and unmap it when handle is destroyed */
+}
+
+
+
+UMP_API_EXPORT void ump_reference_add(ump_handle memh)
+{
+ ump_mem * mem = (ump_mem*)memh;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != mem->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < mem->ref_count, ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < mem->size, ("Memory size of passed handle too low"));
+
+ _ump_osu_lock_wait(mem->ref_lock, _UMP_OSU_LOCKMODE_RW);
+ mem->ref_count += 1;
+ _ump_osu_lock_signal(mem->ref_lock, _UMP_OSU_LOCKMODE_RW);
+}
+
+
+
+UMP_API_EXPORT void ump_reference_release(ump_handle memh)
+{
+ ump_mem * mem = (ump_mem*)memh;
+
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != ((ump_mem*)mem)->secure_id, ("Secure ID is inavlid"));
+ UMP_DEBUG_ASSERT(0 < (((ump_mem*)mem)->ref_count), ("Reference count too low"));
+ UMP_DEBUG_ASSERT(0 < ((ump_mem*)mem)->size, ("Memory size of passed handle too low"));
+ UMP_DEBUG_ASSERT(NULL != ((ump_mem*)mem)->mapped_mem, ("Error in mapping pointer (not mapped)"));
+
+ _ump_osu_lock_wait(mem->ref_lock, _UMP_OSU_LOCKMODE_RW);
+ mem->ref_count -= 1;
+ if (0 == mem->ref_count)
+ {
+ /* Remove memory mapping, which holds our only reference towards the UMP kernel space driver */
+ ump_arch_unmap(mem->mapped_mem, mem->size, mem->cookie);
+
+ _ump_osu_lock_signal(mem->ref_lock, _UMP_OSU_LOCKMODE_RW);
+
+ /* Free the lock protecting the reference count */
+ _ump_osu_lock_term(mem->ref_lock);
+
+ /* Free the memory for this handle */
+ _ump_osu_free(mem);
+ } else {
+ _ump_osu_lock_signal(mem->ref_lock, _UMP_OSU_LOCKMODE_RW);
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_internal.c
+ *
+ * Internal definitions and debugging macros for the UMP implementation.
+ */
+
+#ifndef _UNIFIED_MEMORY_PROVIDER_INTERNAL_H_
+#define _UNIFIED_MEMORY_PROVIDER_INTERNAL_H_
+
+#include "ump.h"
+#include <ump/ump_osu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum ump_cache_enabled
+{
+ UMP_CACHE_DISABLE = 0,
+ UMP_CACHE_ENABLE = 1
+} ump_cache_enabled;
+
+/**
+ * The actual (hidden) definition of ump_handles.
+ */
+typedef struct ump_mem
+{
+ ump_secure_id secure_id; /**< UMP device driver cookie */
+ void * mapped_mem; /**< Mapped memory; all read and write use this */
+ unsigned long size; /**< Size of allocated memory */
+ _ump_osu_lock_t* ref_lock; /**< Lock protection ref_count */
+ int ref_count; /**< The reference count of the ump_handle in userspace. It is used for finding out
+ when to free the memory used by this userspace handle. It is NOT the same as the
+ real ump_mem reference count in the devicedriver which do reference counting
+ for the memory that this handle reveals. */
+ unsigned long cookie; /**< cookie for use in arch_unmap calls */
+ ump_cache_enabled is_cached;
+} ump_mem;
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* _UNIFIED_MEMORY_PROVIDER_INTERNAL_H_ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_ref_drv.c
+ *
+ * Implementation of the user space API extensions provided by the reference implementation.
+ */
+
+#include "ump_ref_drv.h"
+#include "ump.h"
+#include "ump_internal.h"
+#include "ump_arch.h"
+#include <ump/ump_debug.h>
+#include <ump/ump_osu.h>
+
+/* Allocate a buffer which can be used directly by hardware, 4kb aligned */
+static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache);
+static ump_handle ump_ref_drv_ion_import_internal(int ion_fd, ump_alloc_constraints constraints, ump_cache_enabled cache);
+
+
+/* Allocate a buffer which can be used directly by hardware, 4kb aligned */
+ump_handle ump_ref_drv_allocate(unsigned long size, ump_alloc_constraints constraints)
+{
+ ump_cache_enabled cache= UMP_CACHE_DISABLE;
+ if ( 0!=(constraints&UMP_REF_DRV_CONSTRAINT_USE_CACHE) )
+ {
+ cache = UMP_CACHE_ENABLE;
+ }
+ return ump_ref_drv_allocate_internal(size, constraints, cache);
+}
+
+ump_handle ump_ref_drv_ion_import(int ion_fd, ump_alloc_constraints constraints)
+{
+ ump_cache_enabled cache= UMP_CACHE_DISABLE;
+ if ( 0!=(constraints&UMP_REF_DRV_CONSTRAINT_USE_CACHE) )
+ {
+ cache = UMP_CACHE_ENABLE;
+ }
+ return ump_ref_drv_ion_import_internal(ion_fd, constraints, cache);
+}
+
+UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void* address, int size)
+{
+ int offset;
+ ump_mem * mem = (ump_mem*)memh;
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+
+ /* If the op is readout, we do the readout from DD.
+ Else we skip flushing if the userspace handle says that it is uncached */
+ if ((UMP_MSYNC_READOUT_CACHE_ENABLED!=op) && (0 == mem->is_cached) ) return 0;
+
+ if ( NULL == address )
+ {
+ address = ((ump_mem*)mem)->mapped_mem;
+ }
+ offset = (int) ((unsigned long)address - (unsigned long)((ump_mem*)mem)->mapped_mem);
+
+ if ( 0 == size )
+ {
+ size = (int)((ump_mem*)mem)->size;
+ }
+
+ UMP_DEBUG_ASSERT(0 < (((ump_mem*)mem)->ref_count), ("Reference count too low"));
+ UMP_DEBUG_ASSERT((size>=0) && (size <= (int)((ump_mem*)mem)->size), ("Memory size of passed handle too low"));
+ UMP_DEBUG_ASSERT(NULL != ((ump_mem*)mem)->mapped_mem, ("Error in mapping pointer (not mapped)"));
+
+ if ( (offset+size) > (int)mem->size)
+ {
+ size = mem->size - offset;
+ }
+
+ mem->is_cached = ump_arch_msync(mem->secure_id, mem->mapped_mem, mem->cookie, address, size, op);
+ return mem->is_cached ;
+}
+
+UMP_API_EXPORT int ump_cache_operations_control(ump_cache_op_control op)
+{
+ return ump_arch_cache_operations_control(op);
+}
+
+UMP_API_EXPORT int ump_switch_hw_usage( ump_handle memh, ump_hw_usage new_user )
+{
+ ump_mem * mem = (ump_mem*)memh;
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ return ump_arch_switch_hw_usage(mem->secure_id, new_user);
+}
+
+UMP_API_EXPORT int ump_lock( ump_handle memh, ump_lock_usage lock_usage)
+{
+ ump_mem * mem = (ump_mem*)memh;
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ return ump_arch_lock(mem->secure_id, lock_usage);
+}
+
+UMP_API_EXPORT int ump_unlock( ump_handle memh )
+{
+ ump_mem * mem = (ump_mem*)memh;
+ UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid"));
+ return ump_arch_unlock(mem->secure_id);
+}
+
+UMP_API_EXPORT int ump_switch_hw_usage_secure_id( ump_secure_id ump_id, ump_hw_usage new_user )
+{
+ return ump_arch_switch_hw_usage(ump_id, new_user);
+}
+
+/** Locking buffer. Blocking call if the buffer is already locked. */
+UMP_API_EXPORT int ump_lock_secure_id( ump_secure_id ump_id, ump_lock_usage lock_usage )
+{
+ return ump_arch_lock(ump_id, lock_usage);
+}
+
+/** Unlocking buffer. Let other users lock the buffer for their usage */
+UMP_API_EXPORT int ump_unlock_secure_id( ump_secure_id ump_id )
+{
+ return ump_arch_unlock(ump_id);
+}
+
+/* Allocate a buffer which can be used directly by hardware, 4kb aligned */
+static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache)
+{
+ ump_secure_id secure_id;
+ unsigned long allocated_size = size;
+
+ UMP_DEBUG_PRINT(4, ("%s Allocating UMP memory of size %lu cache=%d", __func__, size, cache));
+
+ secure_id = ump_arch_allocate(&allocated_size, constraints);
+ if (secure_id != UMP_INVALID_SECURE_ID)
+ {
+ unsigned long cookie;
+ void * mapping;
+
+ mapping = ump_arch_map(secure_id, allocated_size, cache, &cookie);
+ if (NULL != mapping)
+ {
+ /*
+ * PS: By now we have actually increased the ref count in the device driver by 2,
+ * one for the allocation iteself, and one for the mapping.
+ */
+ ump_mem * mem;
+ mem = _ump_osu_calloc(1, sizeof(*mem));
+ if (NULL != mem)
+ {
+ mem->secure_id = secure_id;
+ mem->mapped_mem = mapping;
+ mem->size = allocated_size;
+ mem->cookie = cookie;
+ mem->is_cached = UMP_CACHE_ENABLE; /* Default to ON, is disabled later if not */
+
+ _ump_osu_lock_auto_init(&mem->ref_lock, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0);
+ UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n"));
+ mem->ref_count = 1;
+
+ /*
+ * ump_arch_allocate() gave us a kernel space reference, and the same did ump_arch_map()
+ * We release the one from ump_arch_allocate(), and rely solely on the one from the ump_arch_map()
+ * That is, ump_arch_unmap() should now do the final release towards the UMP kernel space driver.
+ */
+ ump_arch_reference_release(secure_id);
+
+ /* This is called only to set the cache settings in this handle */
+ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0);
+
+ UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem));
+
+ return (ump_handle)mem;
+ }
+
+ ump_arch_unmap(mapping, allocated_size, cookie); /* Unmap the memory */
+ ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */
+ }
+
+ ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */
+ }
+
+ UMP_DEBUG_PRINT(4, ("Allocation of UMP memory failed"));
+ return UMP_INVALID_MEMORY_HANDLE;
+}
+
+static ump_handle ump_ref_drv_ion_import_internal(int ion_fd, ump_alloc_constraints constraints, ump_cache_enabled cache)
+{
+ ump_secure_id secure_id;
+ unsigned long allocated_size = 0;
+
+ UMP_DEBUG_PRINT(4, ("%s iond_fd=%d constraints=%x cache=%x, Allocating ION memory\n", __func__, ion_fd, constraints, cache));
+
+ secure_id = ump_arch_ion_import(ion_fd, &allocated_size, constraints);
+ if (secure_id != UMP_INVALID_SECURE_ID)
+ {
+ unsigned long cookie;
+ void * mapping;
+
+ UMP_DEBUG_PRINT(4, ("%s secure_id=%x allocated_size=%lu\n", __func__, secure_id, allocated_size));
+
+ mapping = ump_arch_map(secure_id, allocated_size, cache, &cookie);
+ if (NULL != mapping)
+ {
+ /*
+ * PS: By now we have actually increased the ref count in the device driver by 2,
+ * one for the allocation iteself, and one for the mapping.
+ */
+ ump_mem * mem;
+ mem = _ump_osu_calloc(1, sizeof(*mem));
+ if (NULL != mem)
+ {
+ mem->secure_id = secure_id;
+ mem->mapped_mem = mapping;
+ mem->size = allocated_size;
+ mem->cookie = cookie;
+ mem->is_cached = UMP_CACHE_ENABLE; /* Default to ON, is disabled later if not */
+
+ _ump_osu_lock_auto_init(&mem->ref_lock, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0);
+ UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n"));
+ mem->ref_count = 1;
+
+ /*
+ * ump_arch_allocate() gave us a kernel space reference, and the same did ump_arch_map()
+ * We release the one from ump_arch_allocate(), and rely solely on the one from the ump_arch_map()
+ * That is, ump_arch_unmap() should now do the final release towards the UMP kernel space driver.
+ */
+ ump_arch_reference_release(secure_id);
+
+ /* This is called only to set the cache settings in this handle */
+ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0);
+
+ UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem));
+
+ return (ump_handle)mem;
+ }
+
+ ump_arch_unmap(mapping, allocated_size, cookie); /* Unmap the memory */
+ ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */
+ }
+
+ ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */
+ }
+
+ UMP_DEBUG_PRINT(4, ("Allocation of UMP memory failed"));
+ return UMP_INVALID_MEMORY_HANDLE;
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_debug.h
+ *
+ * The file include several useful macros for debugging and printing.
+ * - UMP_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - UMP_DEBUG_TRACE() Prints current location in code.
+ * - UMP_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=UMP_DEBUG_LEVEL.
+ * - UMP_DEBUG_TPRINT(nr, X ) Prints the source trace and second argument if nr<=UMP_DEBUG_LEVEL.
+ * - UMP_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - UMP_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - UMP_DEBUG_ASSERT_RANGE(x, min, max) Triggers if variable x is not between or equal to max and min.
+ * - UMP_DEBUG_ASSERT_LEQ(x, max) Triggers if variable x is not less than equal to max.
+ * - UMP_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - UMP_DEBUG_CODE( X ) The code inside the macro is only copiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parantese around the argumentlist.
+ *
+ * The printf function: UMP_PRINTF(...) is routed to _ump_sys_printf
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ *
+ */
+#ifndef _UMP_DEBUG_H_
+#define _UMP_DEBUG_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <cutils/log.h>
+
+/* START: Configuration */
+#ifndef UMP_PRINTF
+ #define UMP_PRINTF printf
+#endif /* UMP_PRINTF */
+
+#ifndef UMP_PRINT_FLUSH
+ #define UMP_PRINT_FLUSH do {} while (0)
+#endif /* UMP_PRINT_FLUSH */
+
+#ifndef UMP_DEBUG_LEVEL
+ #define UMP_DEBUG_LEVEL 1
+#endif /* UMP_DEBUG_LEVEL */
+
+#ifndef UMP_DEBUG_ERROR_START_MSG
+ #define UMP_DEBUG_ERROR_START_MSG do {\
+ UMP_PRINTF("*********************************************************************\n");\
+ UMP_PRINT_FLUSH; } while (0)
+#endif /* UMP_DEBUG_ERROR_START_MSG */
+
+#ifndef UMP_DEBUG_ERROR_STOP_MSG
+ #define UMP_DEBUG_ERROR_STOP_MSG do { UMP_PRINTF("\n"); UMP_PRINT_FLUSH; } while (0)
+#endif /* UMP_DEBUG_ERROR_STOP_MSG */
+
+#ifndef UMP_ASSERT_QUIT_CMD
+ #define UMP_ASSERT_QUIT_CMD abort()
+#endif /* UMP_ASSERT_QUIT_CMD */
+/* STOP: Configuration */
+
+/**
+ * The macro UMP_FUNCTION evaluates to the name of the function enclosing
+ * this macro's usage, or "<unknown>" if not supported.
+ */
+#if (defined(__SYMBIAN32__) && defined(__ARMCC__)) || defined(_MSC_VER)
+# define UMP_FUNCTION __FUNCTION__
+#elif __STDC__ && __STDC_VERSION__ >= 199901L
+# define UMP_FUNCTION __FUNCTION__
+#elif defined(__GNUC__) && __GNUC__ >= 2
+# define UMP_FUNCTION __FUNCTION__
+#elif defined(__func__)
+# define UMP_FUNCTION __func__
+#else
+# define UMP_FUNCTION "<unknown>"
+#endif
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define UMP_IGNORE(x) (void)x
+
+/**
+ * @def UMP_DEBUG_TRACE()
+ * @brief Prints current location in code.
+ * Can be turned off by defining UMP_DEBUG_SKIP_TRACE
+ */
+
+#ifndef UMP_DEBUG_SKIP_TRACE
+ #ifndef UMP_DEBUG_SKIP_PRINT_FUNCTION_NAME
+ #define UMP_DEBUG_TRACE() do { UMP_PRINTF( "In file: "__FILE__ \
+ " function: %s() line:%4d\n" , UMP_FUNCTION, __LINE__); UMP_PRINT_FLUSH; } while (0)
+ #else
+ #define UMP_DEBUG_TRACE() do { UMP_PRINTF( "In file: "__FILE__ " line:%4d\n" , __LINE__); UMP_PRINT_FLUSH; } while (0)
+ #endif /* UMP_DEBUG_SKIP_PRINT_FUNCTION_NAME */
+#else
+ #define UMP_DEBUG_TRACE()
+#endif /* UMP_DEBUG_SKIP_TRACE */
+
+/**
+ * @def UMP_DEBUG_PRINT(nr, (X) )
+ * @brief Prints the second argument if nr<=UMP_DEBUG_LEVEL.
+ * Can be turned off by defining UMP_DEBUG_SKIP_PRINT
+ * @param nr If nr <= UMP_DEBUG_LEVEL, we print the text.
+ * @param X A parantese with the contents to be sent to UMP_PRINTF
+ */
+#ifndef UMP_DEBUG_SKIP_PRINT
+ #define UMP_DEBUG_PRINT(nr, X ) if ( nr<=UMP_DEBUG_LEVEL ) ALOGE X ;
+#else
+ #define UMP_DEBUG_PRINT(nr, X )
+#endif /* UMP_DEBUG_SKIP_PRINT */
+
+/**
+ * @def UMP_DEBUG_TPRINT(nr, (X) )
+ * @brief Prints the second argument if nr<=UMP_DEBUG_LEVEL.
+ * Can be turned off by defining UMP_DEBUG_SKIP_TPRINT.
+ * Can be shortened by defining UMP_DEBUG_TPRINT_SKIP_FUNCTION.
+ * @param nr If nr <= UMP_DEBUG_LEVEL, we print the text.
+ * @param X A parantese with the contents to be sent to UMP_PRINTF
+ */
+
+/* helper to handle if the function name should be included or not */
+#ifndef UMP_DEBUG_TPRINT_SKIP_FUNCTION
+ #define UMP_DEBUG_TPRINT_INTERN do {UMP_PRINTF( ""__FILE__" %s()%4d " , UMP_FUNCTION, __LINE__); UMP_PRINT_FLUSH; } while (0)
+#else
+ #define UMP_DEBUG_TPRINT_INTERN do {UMP_PRINTF( ""__FILE__ "%4d " , __LINE__); UMP_PRINT_FLUSH; } while (0)
+#endif /* UMP_DEBUG_TPRINT_SKIP_FUNCTION */
+
+#ifndef UMP_DEBUG_SKIP_TPRINT
+ #define UMP_DEBUG_TPRINT(nr, X ) \
+ do{\
+ if ( nr<=UMP_DEBUG_LEVEL )\
+ {\
+ UMP_DEBUG_TPRINT_INTERN;\
+ UMP_PRINTF X ;\
+ UMP_PRINT_FLUSH;\
+ }\
+ } while (0)
+#else
+ #define UMP_DEBUG_TPRINT(nr, X )
+#endif /* UMP_DEBUG_SKIP_TPRINT */
+
+/**
+ * @def UMP_DEBUG_ERROR( (X) )
+ * @brief Prints an errortext, a source Trace, and the given error message.
+ * Prints filename, function, linenr, and the given error message.
+ * The error message must be inside a second parantese.
+ * The error message is written on a separate line, and a NL char is added.
+ * Can be turned of by defining UMP_DEBUG_SKIP_ERROR;
+ * You do not need to type the words ERROR in the message, since it will
+ * be added anyway.
+ *
+ * @note You should not end the text with a newline, since it is added by the macro.
+ * @note You should not write "ERROR" in the text, since it is added by the macro.
+ * @param X A parantese with the contents to be sent to UMP_PRINTF
+ */
+
+#ifndef UMP_DEBUG_SKIP_ERROR
+ #define UMP_DEBUG_ERROR( X ) \
+ do{ \
+ UMP_DEBUG_ERROR_START_MSG;\
+ UMP_PRINTF("ERROR: ");\
+ UMP_PRINT_FLUSH;\
+ UMP_DEBUG_TRACE(); \
+ UMP_PRINTF X ; \
+ UMP_PRINT_FLUSH;\
+ UMP_DEBUG_ERROR_STOP_MSG;\
+ } while (0)
+#else
+ #define UMP_DEBUG_ERROR( X ) do{ ; } while ( 0 )
+#endif /* UMP_DEBUG_SKIP_ERROR */
+
+/**
+ * @def UMP_DEBUG_ASSERT(expr, (X) )
+ * @brief If the asserted expr is false, the program will exit.
+ * Prints filename, function, linenr, and the given error message.
+ * The error message must be inside a second parantese.
+ * The error message is written on a separate line, and a NL char is added.
+ * Can be turned of by defining UMP_DEBUG_SKIP_ERROR;
+ * You do not need to type the words ASSERT in the message, since it will
+ * be added anyway.
+ *
+ * @param X A parantese with the contents to be sent to UMP_PRINTF
+ * Prints filename, function, linenr, and the error message
+ * on a separte line. A newline char is added at the end.
+ * Can be turned of by defining UMP_DEBUG_SKIP_ASSERT
+ * @param expr Will exit program if \a expr is false;
+ * @param (X) Text that will be written if the assertion toggles.
+ */
+
+#ifndef UMP_DEBUG_SKIP_ASSERT
+ #define UMP_DEBUG_ASSERT(expr, X ) \
+ do{\
+ if ( !(expr) ) \
+ { \
+ UMP_DEBUG_ERROR_START_MSG;\
+ UMP_PRINTF("ASSERT EXIT: ");\
+ UMP_PRINT_FLUSH;\
+ UMP_DEBUG_TRACE(); \
+ UMP_PRINTF X ; \
+ UMP_PRINT_FLUSH;\
+ UMP_DEBUG_ERROR_STOP_MSG;\
+ UMP_ASSERT_QUIT_CMD;\
+ }\
+ } while (0)
+#else
+ #define UMP_DEBUG_ASSERT(expr, X)
+#endif /* UMP_DEBUG_SKIP_ASSERT */
+
+
+/**
+ * @def UMP_DEBUG_ASSERT_POINTER(pointer)
+ * @brief If the asserted pointer is NULL, the program terminates and TRACE info is printed
+ * The checking is disabled if "UMP_DEBUG_SKIP_ASSERT" is defined.
+ */
+#define UMP_DEBUG_ASSERT_POINTER(pointer) UMP_DEBUG_ASSERT(pointer, ("Null pointer " #pointer) )
+
+/**
+ * @def UMP_DEBUG_ASSERT_HANDLE(handle)
+ * @brief If the asserted handle is not a valid handle, the program terminates and TRACE info is printed
+ * The checking is disabled if "UMP_DEBUG_SKIP_ASSERT" is defined.
+ */
+#define UMP_DEBUG_ASSERT_HANDLE(handle) UMP_DEBUG_ASSERT(UMP_NO_HANDLE != (handle), ("Invalid handle" #handle) )
+
+/**
+ * @def UMP_DEBUG_ASSERT_ALIGNMENT(ptr, align)
+ * @brief If the asserted pointer is not aligned to align, the program terminates with trace info printed.
+ * The checking is disabled if "UMP_DEBUG_SKIP_ASSERT" is defined.
+ */
+#ifndef UMP_DEBUG_SKIP_ASSERT
+ #define UMP_DEBUG_ASSERT_ALIGNMENT(ptr, align) do { \
+ UMP_DEBUG_ASSERT(0 == (align & (align - 1)), ("align %d is not a power-of-two", align)); \
+ UMP_DEBUG_ASSERT(0 == (((u32)(ptr)) & (align - 1)), ("ptr %p not aligned to %d bytes", (void*)ptr, align)); \
+ } while (0)
+#else
+ #define UMP_DEBUG_ASSERT_ALIGNMENT(ptr, align)
+#endif /* UMP_DEBUG_SKIP_ASSERT */
+
+/**
+ * @def UMP_DEBUG_ASSERT_RANGE(x,min,max)
+ * @brief If variable x is not between or equal to max and min, the assertion triggers.
+ * The checking is disabled if "UMP_DEBUG_SKIP_ASSERT" is defined.
+ */
+#define UMP_DEBUG_ASSERT_RANGE(x, min, max) \
+ UMP_DEBUG_ASSERT( (x) >= (min) && (x) <= (max), \
+ (#x " out of range (%2.2f)", (double)x ) \
+ )
+
+/**
+ * @def UMP_DEBUG_ASSERT_LEQ(x,max)
+ * @brief If variable x is less than or equal to max, the assertion triggers.
+ * The checking is disabled if "UMP_DEBUG_SKIP_ASSERT" is defined.
+ */
+#define UMP_DEBUG_ASSERT_LEQ(x, max) \
+ UMP_DEBUG_ASSERT( (x) <= (max), \
+ (#x " out of range (%2.2f)", (double)x ) \
+ )
+
+/**
+ * @def UMP_DEBUG_CODE( X )
+ * @brief Run the code X on debug builds.
+ * The code will not be used if UMP_DEBUG_SKIP_CODE is defined .
+ *
+ */
+#ifdef UMP_DEBUG_SKIP_CODE
+ #define UMP_DEBUG_CODE( X )
+#else
+ #define UMP_DEBUG_CODE( X ) X
+#endif /* UMP_DEBUG_SKIP_CODE */
+
+#endif /* _UMP_DEBUG_H_ */
+
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_osu.h
+ * Defines the OS abstraction layer for the base driver
+ */
+
+#ifndef __UMP_OSU_H__
+#define __UMP_OSU_H__
+
+#include <stdarg.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+typedef unsigned int u32;
+#ifdef _MSC_VER
+ typedef unsigned __int64 u64;
+ typedef signed __int64 s64;
+#else
+ typedef unsigned long long u64;
+ typedef signed long long s64;
+#endif
+
+#ifndef NULL
+#define NULL ((void*)0)
+#endif
+
+typedef unsigned long ump_bool;
+
+#ifndef UMP_TRUE
+#define UMP_TRUE ((ump_bool)1)
+#endif
+
+#ifndef UMP_FALSE
+#define UMP_FALSE ((ump_bool)0)
+#endif
+
+#define UMP_STATIC static
+
+/**
+ * @addtogroup ump_user_space_api Unified Device Driver (UDD) APIs used by UMP
+ *
+ * @{
+ */
+
+/**
+ * @defgroup ump_osuapi UDD OS Abstraction for User-side (OSU) APIs for UMP
+ *
+ * @{
+ */
+
+/* The following is necessary to prevent the _ump_osk_errcode_t doxygen from
+ * becoming unreadable: */
+/** @cond OSU_COPY_OF__UMP_OSU_ERRCODE_T */
+
+/**
+ * @brief OSU/OSK Error codes.
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * through from the OSK/OSU to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK/OSU layer must translate native OS error codes to
+ * _ump_osk/u_errcode_t codes. Similarly, the U/K layer must translate from
+ * _ump_osk/u_errcode_t codes to native OS error codes.
+ *
+ */
+typedef enum
+{
+ _UMP_OSK_ERR_OK = 0, /**< Success. */
+ _UMP_OSK_ERR_FAULT = -1, /**< General non-success */
+ _UMP_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _UMP_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _UMP_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _UMP_OSK_ERR_TIMEOUT = -5, /**< Timeout occured */
+ _UMP_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _UMP_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _UMP_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _UMP_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _ump_osk_errcode_t;
+
+/** @endcond */ /* end cond OSU_COPY_OF__UMP_OSU_ERRCODE_T */
+
+/**
+ * @brief OSU Error codes.
+ *
+ * OSU error codes - enum values intentionally same as OSK
+ */
+typedef enum
+{
+ _UMP_OSU_ERR_OK = 0, /**< Success. */
+ _UMP_OSU_ERR_FAULT = -1, /**< General non-success */
+ _UMP_OSU_ERR_TIMEOUT = -2, /**< Timeout occured */
+} _ump_osu_errcode_t;
+
+/** @brief Translate OSU error code to base driver error code.
+ *
+ * The _UMP_OSU_TRANSLATE_ERROR macro translates an OSU error code to the
+ * error codes in use by the base driver.
+ */
+#define _UMP_OSU_TRANSLATE_ERROR(_ump_osu_errcode) ( ( _UMP_OSU_ERR_OK == (_ump_osu_errcode) ) ? UMP_ERR_NO_ERROR : UMP_ERR_FUNCTION_FAILED)
+
+/** @defgroup _ump_osu_lock OSU Mutual Exclusion Locks
+ * @{ */
+
+/** @brief OSU Mutual Exclusion Lock flags type.
+ *
+ * This is made to look like and function identically to the OSK locks (refer
+ * to \ref _ump_osk_lock). However, please note the following \b important
+ * differences:
+ * - the OSU default lock is a Sleeping, non-interruptible mutex.
+ * - the OSU adds the ANYUNLOCK type of lock which allows a thread which doesn't
+ * own the lock to release the lock.
+ * - the order parameter when creating a lock is currently unused
+ *
+ * @note Pay careful attention to the difference in default locks for OSU and
+ * OSK locks; OSU locks are always non-interruptible, but OSK locks are by
+ * default, interruptible. This has implications for systems that do not
+ * distinguish between user and kernel mode.
+ */
+typedef enum
+{
+ _UMP_OSU_LOCKFLAG_DEFAULT = 0, /**< Default lock type. */
+ /** @enum _ump_osu_lock_flags_t
+ *
+ * Flags from 0x0--0x8000 are RESERVED for Kernel-mode
+ */
+ _UMP_OSU_LOCKFLAG_ANYUNLOCK = 0x10000, /**< Mutex that guarantees that any thread can unlock it when locked. Otherwise, this will not be possible. */
+ /** @enum _ump_osu_lock_flags_t
+ *
+ * Flags from 0x10000 are RESERVED for User-mode
+ */
+ _UMP_OSU_LOCKFLAG_STATIC = 0x20000, /* Flag in OSU reserved range to identify lock as a statically initialized lock */
+
+ } _ump_osu_lock_flags_t;
+
+typedef enum
+{
+ _UMP_OSU_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _UMP_OSU_LOCKMODE_RW = 0x0, /**< Default. Lock is used to protect data that is read from and written to */
+ /** @enum _ump_osu_lock_mode_t
+ *
+ * Lock modes 0x1--0x3F are RESERVED for Kernel-mode */
+} _ump_osu_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects. */
+typedef struct _ump_osu_lock_t_struct _ump_osu_lock_t;
+
+/** @brief The number of static locks supported in _ump_osu_lock_static(). */
+#define UMP_OSU_STATIC_LOCK_COUNT (sizeof(_ump_osu_static_locks) / sizeof(_ump_osu_lock_t))
+
+/** @} */ /* end group _ump_osu_lock */
+
+/** @defgroup _ump_osu_memory OSU Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _ump_osu_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_ump_osu_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _ump_osu_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _ump_osu_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_ump_osu_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _ump_osu_malloc(), _ump_osu_calloc() and
+ * _ump_osu_realloc() must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _ump_osu_free( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_ump_osu_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a size bytes of the block of memory pointed to by \a ptr to
+ * the specified value
+ * @param ptr Pointer to the block of memory to fill.
+ * @param chr Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param size Number of bytes to be set to the value.
+ * @return \a ptr is always passed through unmodified
+ */
+void *_ump_osu_memset( void *ptr, u32 chr, u32 size );
+
+/** @} */ /* end group _ump_osu_memory */
+
+
+/** @addtogroup _ump_osu_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock.
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * The parameter \a initial must be zero.
+ *
+ * At present, the parameter \a order must be zero. It remains for future
+ * expansion for mutex order checking.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, see \ref _ump_osu_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. SBZ.
+ * @return On success, a pointer to a \ref _ump_osu_lock_t object. NULL on failure.
+ */
+_ump_osu_lock_t *_ump_osu_lock_init( _ump_osu_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Obtain a statically initialized Mutual Exclusion Lock.
+ *
+ * Retrieves a reference to a statically initialized lock. Up to
+ * _UMP_OSU_STATIC_LOCK_COUNT statically initialized locks are
+ * available. Only _ump_osu_lock_wait(), _ump_osu_lock_trywait(),
+ * _ump_osu_lock_signal() can be used with statically initialized locks.
+ * _UMP_OSU_LOCKMODE_RW mode should be used when waiting and signalling
+ * statically initialized locks.
+ *
+ * For the same \a nr a pointer to the same statically initialized lock is
+ * returned. That is, given the following code:
+ * @code
+ * extern u32 n;
+ *
+ * _ump_osu_lock_t *locka = _ump_osu_lock_static(n);
+ * _ump_osu_lock_t *lockb = _ump_osu_lock_static(n);
+ * @endcode
+ * Then (locka == lockb), for all 0 <= n < UMP_OSU_STATIC_LOCK_COUNT.
+ *
+ * @param nr index of a statically initialized lock [0..UMP_OSU_STATIC_LOCK_COUNT-1]
+ * @return On success, a pointer to a _ump_osu_lock_t object. NULL on failure.
+ */
+_ump_osu_lock_t *_ump_osu_lock_static( u32 nr );
+
+/** @brief Initialize a Mutual Exclusion Lock safely across multiple threads.
+ *
+ * The _ump_osu_lock_auto_init() function guarantees that the given lock will
+ * be initialized once and precisely once, even in a situation involving
+ * multiple threads.
+ *
+ * This is necessary because the first call to certain Public API functions must
+ * initialize the API. However, there can be a race involved to call the first
+ * library function in multi-threaded applications. To resolve this race, a
+ * mutex can be used. This mutex must be initialized, but initialized only once
+ * by any thread that might compete for its initialization. This function
+ * guarantees the initialization to happen correctly, even when there is an
+ * initialization race between multiple threads.
+ *
+ * Otherwise, the operation is identical to the _ump_osu_lock_init() function.
+ * For more details, refer to _ump_osu_lock_init().
+ *
+ * @param pplock pointer to storage for a _ump_osu_lock_t pointer. This
+ * _ump_osu_lock_t pointer may point to a _ump_osu_lock_t that has been
+ * initialized already
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined. Refer to
+ * \ref _ump_osu_lock_flags_t for more information.
+ * The absence of any flags (the value 0) results in a sleeping-mutex,
+ * which is non-interruptible.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. SBZ.
+ * @return On success, _UMP_OSU_ERR_OK is returned and a pointer to an
+ * initialized \ref _ump_osu_lock_t object is written into \a *pplock.
+ * _UMP_OSU_ERR_FAULT is returned on failure.
+ */
+_ump_osu_errcode_t _ump_osu_lock_auto_init( _ump_osu_lock_t **pplock, _ump_osu_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained).
+ *
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Currently this
+ * must be _UMP_OSU_LOCKMODE_RW.
+ * @return On success, _UMP_OSU_ERR_OK, _UMP_OSU_ERR_FAULT on error.
+ */
+_ump_osu_errcode_t _ump_osu_lock_wait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode);
+
+/** @brief Wait for a lock to be signalled (obtained) with timeout
+ *
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * This version can return early if it cannot obtain the lock within the given timeout.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Currently this
+ * must be _UMP_OSU_LOCKMODE_RW.
+ * @param timeout Relative time in microseconds for the timeout
+ * @return _UMP_OSU_ERR_OK if the lock was obtained, _UMP_OSU_ERR_TIMEOUT if the timeout expired or _UMP_OSU_ERR_FAULT on error.
+ */
+_ump_osu_errcode_t _ump_osu_lock_timed_wait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode, u64 timeout);
+
+/** @brief Test for a lock to be signalled and obtains the lock when so.
+ *
+ * Obtains the lock only when it is in signalled state. The lock is then
+ * marked as unsignalled. The lock is released again by signalling
+ * it by _ump_osu_lock_signal().
+ *
+ * If the lock could not be obtained immediately (that is, another thread
+ * currently holds the lock), then this function \b does \b not wait for the
+ * lock to be in a signalled state. Instead, an error code is immediately
+ * returned to indicate that the thread could not obtain the lock.
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Currently this
+ * must be _UMP_OSU_LOCKMODE_RW.
+ * @return When the lock was obtained, _UMP_OSU_ERR_OK. If the lock could not
+ * be obtained, _UMP_OSU_ERR_FAULT.
+ */
+_ump_osu_errcode_t _ump_osu_lock_trywait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode);
+
+/** @brief Signal (release) a lock.
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock, unless the lock was created using the _UMP_OSU_LOCKFLAG_ANYUNLOCK flag.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _ump_osu_lock_signal( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode );
+
+/** @brief Terminate a lock.
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _ump_osu_lock_term( _ump_osu_lock_t *lock );
+/** @} */ /* end group _ump_osu_lock */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_OSU_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+ _UMP_IOC_CACHE_OPERATIONS_CONTROL,
+ _UMP_IOC_SWITCH_HW_USAGE,
+ _UMP_IOC_LOCK,
+ _UMP_IOC_UNLOCK,
+ /* as defined in kernel drivers/gpu/mali400/r3p2/ump/include/ump_uk_types.h */
+ _UMP_IOC_ION_IMPORT,
+ /*_UMP_IOC_DMABUF_IMPORT,*/
+}_ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_INVALIDATE = 2,
+ _UMP_UK_MSYNC_FLUSH_L1 = 3,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+typedef enum
+{
+ _UMP_UK_CACHE_OP_START = 0,
+ _UMP_UK_CACHE_OP_FINISH = 1,
+} ump_uk_cache_op_control;
+
+typedef enum
+{
+ _UMP_UK_READ = 1,
+ _UMP_UK_READ_WRITE = 3,
+} ump_uk_lock_usage;
+
+typedef enum
+{
+ _UMP_UK_USED_BY_CPU = 0,
+ _UMP_UK_USED_BY_MALI = 1,
+ _UMP_UK_USED_BY_UNKNOWN_DEVICE= 100,
+} ump_uk_user;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+typedef struct _ump_uk_ion_import_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ int ion_fd; /**< ion_fd */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_ion_import_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void * _ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void * _ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+typedef struct _ump_uk_cache_operations_control_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ ump_uk_cache_op_control op; /**< [in] cache operations start/stop */
+} _ump_uk_cache_operations_control_s;
+
+
+typedef struct _ump_uk_switch_hw_usage_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_user new_user; /**< [in] cookie stored with reference to the kernel mapping internals */
+
+} _ump_uk_switch_hw_usage_s;
+
+typedef struct _ump_uk_lock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_lock_usage lock_usage;
+} _ump_uk_lock_s;
+
+typedef struct _ump_uk_unlock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+} _ump_uk_unlock_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include <ump/ump_uk_types.h>
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_msync_s)
+/* MALI_SEC */
+#define UMP_IOC_ION_IMPORT _IOW(UMP_IOCTL_NR, _UMP_IOC_ION_IMPORT, _ump_uk_ion_import_s)
+/* MALI_SEC */
+/*#define UMP_IOC_DMABUF_IMPORT _IOW(UMP_IOCTL_NR, _UMP_IOC_DMABUF_IMPORT,\
+ struct ump_uk_dmabuf) */
+#define UMP_IOC_CACHE_OPERATIONS_CONTROL _IOW(UMP_IOCTL_NR, _UMP_IOC_CACHE_OPERATIONS_CONTROL, _ump_uk_cache_operations_control_s)
+
+#define UMP_IOC_SWITCH_HW_USAGE _IOW(UMP_IOCTL_NR, _UMP_IOC_SWITCH_HW_USAGE, _ump_uk_switch_hw_usage_s)
+#define UMP_IOC_LOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_LOCK, _ump_uk_lock_s)
+#define UMP_IOC_UNLOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_UNLOCK, _ump_uk_unlock_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ((!defined _XOPEN_SOURCE) || ((_XOPEN_SOURCE - 0) < 600))
+#undef _XOPEN_SOURCE
+#define _XOPEN_SOURCE 600
+#endif
+
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200112L
+#elif _POSIX_C_SOURCE < 200112L
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200112L
+#endif
+
+#include <ump/ump_osu.h>
+#include <ump/ump_debug.h>
+
+#include <pthread.h>
+#include <time.h>
+#include <sys/time.h>
+#include <errno.h>
+
+/**
+ * @file ump_osu_locks.c
+ * File implements the user side of the OS interface
+ */
+
+/** @opt Most of the time, we use the plain mutex type of osu_lock, and so
+ * only require the flags and mutex members. This costs 2 extra DWORDS, but
+ * most of the time we don't use those DWORDS.
+ * Therefore, ANY_UNLOCK type osu_locks can be implemented as a second
+ * structure containing the member _ump_osu_lock_t lock_t, plus the extra
+ * state required. Then, we use &container->lock_t when passing out of the
+ * OSU api, and CONTAINER_OF() when passing back in to recover the original
+ * structure. */
+
+/** Private declaration of the OSU lock type */
+struct _ump_osu_lock_t_struct
+{
+ /** At present, only two types of mutex, so we store this information as
+ * the flags supplied at init time */
+ _ump_osu_lock_flags_t flags;
+
+ pthread_mutex_t mutex; /**< Used in both plain and ANY_UNLOCK osu_locks */
+
+ /* Extra State for ANY_UNLOCK osu_locks. These are UNINITIALIZED when
+ * flags does not contain _UMP_OSU_LOCKFLAG_ANYUNLOCK: */
+ pthread_cond_t condition; /**< The condition object to use while blocking */
+ ump_bool state; /**< The boolean which indicates the event's state */
+
+ UMP_DEBUG_CODE(
+ /** debug checking of locks */
+ _ump_osu_lock_mode_t locked_as;
+ ) /* UMP_DEBUG_CODE */
+
+};
+
+/* Provide two statically initialized locks */
+UMP_STATIC _ump_osu_lock_t _ump_osu_static_locks[] =
+{
+ {
+ _UMP_OSU_LOCKFLAG_STATIC,
+ PTHREAD_MUTEX_INITIALIZER,
+ PTHREAD_COND_INITIALIZER,
+ UMP_FALSE,
+ UMP_DEBUG_CODE( _UMP_OSU_LOCKMODE_UNDEF )
+ },
+ {
+ _UMP_OSU_LOCKFLAG_STATIC,
+ PTHREAD_MUTEX_INITIALIZER,
+ PTHREAD_COND_INITIALIZER,
+ UMP_FALSE,
+ UMP_DEBUG_CODE( _UMP_OSU_LOCKMODE_UNDEF )
+ },
+ {
+ _UMP_OSU_LOCKFLAG_STATIC,
+ PTHREAD_MUTEX_INITIALIZER,
+ PTHREAD_COND_INITIALIZER,
+ UMP_FALSE,
+ UMP_DEBUG_CODE( _UMP_OSU_LOCKMODE_UNDEF )
+ },
+ {
+ _UMP_OSU_LOCKFLAG_STATIC,
+ PTHREAD_MUTEX_INITIALIZER,
+ PTHREAD_COND_INITIALIZER,
+ UMP_FALSE,
+ UMP_DEBUG_CODE( _UMP_OSU_LOCKMODE_UNDEF )
+ },
+};
+
+/* Critical section for auto_init */
+UMP_STATIC pthread_mutex_t static_auto_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+
+_ump_osu_errcode_t _ump_osu_lock_auto_init( _ump_osu_lock_t **pplock, _ump_osu_lock_flags_t flags, u32 initial, u32 order )
+{
+ int call_result;
+ /* Validate parameters: */
+ UMP_DEBUG_ASSERT_POINTER( pplock );
+
+ /** @opt We don't lock the Critical Section or do anything if this is already non-null */
+ if ( NULL != *pplock)
+ {
+ return _UMP_OSU_ERR_OK;
+ }
+
+ /* We MIGHT need to initialize it, lock the Critical Section and check again */
+ call_result = pthread_mutex_lock(&static_auto_init_mutex);
+ /* It would be a programming error for this to fail: */
+ UMP_DEBUG_ASSERT( 0 == call_result,
+ ("failed to lock critical section\n") );
+
+ if ( NULL != *pplock )
+ {
+ /*
+ We caught a race condition to initialize this osu_lock.
+ The other thread won the race, so the osu_lock is now initialized.
+ */
+ call_result = pthread_mutex_unlock(&static_auto_init_mutex);
+
+ UMP_DEBUG_ASSERT(0 == call_result,
+ ("failed to unlock critical section\n"));
+
+ return _UMP_OSU_ERR_OK;
+ }
+
+ /* We're the first thread in: initialize the osu_lock */
+ *pplock = _ump_osu_lock_init( flags, initial, order );
+
+ if ( NULL == *pplock )
+ {
+ /* osu_lock creation failed */
+ call_result = pthread_mutex_unlock(&static_auto_init_mutex);
+ UMP_DEBUG_ASSERT(0 == call_result,
+ ("failed to unlock critical section\n"));
+
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+
+ /* osu_lock created OK */
+ call_result = pthread_mutex_unlock(&static_auto_init_mutex);
+
+ UMP_DEBUG_ASSERT(0 == call_result,
+ ("failed to unlock critical section\n"));
+
+ UMP_IGNORE( call_result );
+
+ return _UMP_OSU_ERR_OK;
+}
+
+
+_ump_osu_lock_t *_ump_osu_lock_init( _ump_osu_lock_flags_t flags, u32 initial, u32 order )
+{
+ _ump_osu_lock_t * lock;
+ pthread_mutexattr_t mutex_attributes;
+
+ UMP_IGNORE(order); /* order isn't implemented yet, for now callers should set it to zero. */
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ UMP_DEBUG_ASSERT( 0 == ( flags & ~( _UMP_OSU_LOCKFLAG_ANYUNLOCK)),
+ ("incorrect flags or trying to initialise a statically initialized lock, %.8X\n", flags) );
+
+ /* Parameter initial SBZ - for future expansion */
+ UMP_DEBUG_ASSERT( 0 == initial,
+ ("initial must be zero\n") );
+
+ if (0 != pthread_mutexattr_init(&mutex_attributes))
+ {
+ return NULL;
+ }
+
+#if UMP_DEBUG_EXTENDED_MUTEX_LOCK_CHECKING
+#define UMP_PTHREADS_MUTEX_TYPE PTHREAD_MUTEX_ERRORCHECK
+#else
+#define UMP_PTHREADS_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
+#endif
+
+ if (0 != pthread_mutexattr_settype(&mutex_attributes, UMP_PTHREADS_MUTEX_TYPE))
+ {
+ /** Return NULL on failure */
+ pthread_mutexattr_destroy(&mutex_attributes);
+ return NULL;
+
+ }
+
+#undef UMP_PTHREADS_MUTEX_TYPE
+
+ /** @opt use containing structures for the ANY_UNLOCK type, to
+ * save 2 DWORDS when not in use */
+ lock = _ump_osu_malloc( sizeof(_ump_osu_lock_t) );
+
+ if( NULL == lock )
+ {
+ /** Return NULL on failure */
+ pthread_mutexattr_destroy(&mutex_attributes);
+ return NULL;
+ }
+
+ if (0 != pthread_mutex_init( &lock->mutex, &mutex_attributes ))
+ {
+ pthread_mutexattr_destroy(&mutex_attributes);
+ _ump_osu_free( lock );
+ return NULL;
+ }
+
+ /* done with the mutexattr object */
+ pthread_mutexattr_destroy(&mutex_attributes);
+
+ /* ANY_UNLOCK type */
+ if ( flags & _UMP_OSU_LOCKFLAG_ANYUNLOCK )
+ {
+ if (0 != pthread_cond_init( &lock->condition, NULL ))
+ {
+ /* cleanup */
+ pthread_mutex_destroy( &lock->mutex );
+ _ump_osu_free( lock );
+ return NULL;
+ }
+ lock->state = UMP_FALSE; /* mark as unlocked by default */
+ }
+
+ lock->flags = flags;
+
+ /** Debug lock checking */
+ UMP_DEBUG_CODE( lock->locked_as = _UMP_OSU_LOCKMODE_UNDEF );
+
+ return lock;
+}
+
+_ump_osu_errcode_t _ump_osu_lock_timed_wait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode, u64 timeout)
+{
+ /* absolute time specifier */
+ struct timespec ts;
+ struct timeval tv;
+
+ /* Parameter validation */
+ UMP_DEBUG_ASSERT_POINTER( lock );
+
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode,
+ ("unrecognised mode, %.8X\n", mode) );
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKFLAG_ANYUNLOCK == lock->flags, ("Timed operations only implemented for ANYUNLOCK type locks"));
+
+ /* calculate the realtime timeout value */
+
+ if (0 != gettimeofday(&tv, NULL))
+ {
+ UMP_DEBUG_PRINT(1,("Could not get the current realtime value to calculate the absolute value for a timed mutex lock with a timeout"));
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ tv.tv_usec += timeout;
+
+#define UMP_USECS_PER_SECOND 1000000LL
+#define UMP_NANOSECS_PER_USEC 1000LL
+
+ /* did we overflow a second in the usec part? */
+ while (tv.tv_usec >= UMP_USECS_PER_SECOND)
+ {
+ tv.tv_usec -= UMP_USECS_PER_SECOND;
+ tv.tv_sec++;
+ }
+
+ /* copy to the correct struct */
+ ts.tv_sec = tv.tv_sec;
+ ts.tv_nsec = (tv.tv_usec * UMP_NANOSECS_PER_USEC);
+
+#undef UMP_USECS_PER_SECOND
+#undef UMP_NANOSECS_PER_USEC
+
+ /* lock the mutex protecting access to the state field */
+ pthread_mutex_lock( &lock->mutex );
+ /* loop while locked (state is UMP_TRUE) */
+ /* pthread_cond_timedwait unlocks the mutex, wait, and locks the mutex once unblocked (either due to the event or the timeout) */
+ while ( UMP_TRUE == lock->state )
+ {
+ int res;
+ res = pthread_cond_timedwait( &lock->condition, &lock->mutex, &ts );
+ if (0 == res) continue; /* test the state variable again (loop condition) */
+ else if (ETIMEDOUT == res)
+ {
+ /* timeout, need to clean up and return the correct error code */
+ pthread_mutex_unlock(&lock->mutex);
+ return _UMP_OSU_ERR_TIMEOUT;
+ }
+ else
+ {
+ UMP_DEBUG_PRINT(1, ("Unexpected return from pthread_cond_timedwait 0x%08X\n", res));
+
+ pthread_mutex_unlock(&lock->mutex);
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ }
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as,
+ ("This lock was already locked\n") );
+ UMP_DEBUG_CODE( lock->locked_as = mode );
+
+ /* the state is UMP_FALSE (unlocked), so we set it to UMP_TRUE to indicate that it's locked and can return knowing that we own the lock */
+ lock->state = UMP_TRUE;
+ /* final unlock of the mutex */
+ pthread_mutex_unlock(&lock->mutex);
+
+ return _UMP_OSU_ERR_OK;
+
+}
+
+_ump_osu_errcode_t _ump_osu_lock_wait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode)
+{
+ /* Parameter validation */
+ UMP_DEBUG_ASSERT_POINTER( lock );
+
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode,
+ ("unrecognised mode, %.8X\n", mode) );
+
+ /** @note since only one flag can be set, we use a switch statement here.
+ * Otherwise, MUST add an enum into the _ump_osu_lock_t to store the
+ * implemented lock type */
+ switch ( lock->flags )
+ {
+ case _UMP_OSU_LOCKFLAG_STATIC:
+ case _UMP_OSU_LOCKFLAG_DEFAULT:
+ /* Usual Mutex type */
+ {
+ int call_result;
+ call_result = pthread_mutex_lock( &lock->mutex );
+ UMP_DEBUG_ASSERT( 0 == call_result,
+ ("pthread_mutex_lock call failed with error code %d\n", call_result));
+ UMP_IGNORE( call_result );
+ }
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as,
+ ("This lock was already locked\n") );
+ UMP_DEBUG_CODE( lock->locked_as = mode );
+ break;
+
+ case _UMP_OSU_LOCKFLAG_ANYUNLOCK:
+ /** @note Use of bitflags in a case statement ONLY works because this
+ * is the ONLY flag that is supported */
+
+ /* lock the mutex protecting access to the state field */
+ pthread_mutex_lock( &lock->mutex );
+ /* loop while locked (state is UMP_TRUE) */
+ /* pthread_cond_wait unlocks the mutex, wait, and locks the mutex once unblocked */
+ while ( UMP_TRUE == lock->state ) pthread_cond_wait( &lock->condition, &lock->mutex );
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as,
+ ("This lock was already locked\n") );
+ UMP_DEBUG_CODE( lock->locked_as = mode );
+
+ /* the state is UMP_FALSE (unlocked), so we set it to UMP_TRUE to indicate that it's locked and can return knowing that we own the lock */
+ lock->state = UMP_TRUE;
+ /* final unlock of the mutex */
+ pthread_mutex_unlock(&lock->mutex);
+ break;
+
+ default:
+ UMP_DEBUG_ERROR( ("lock has incorrect flags==%.8X\n", lock->flags) );
+ break;
+ }
+
+ return _UMP_OSU_ERR_OK;
+}
+
+_ump_osu_errcode_t _ump_osu_lock_trywait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode)
+{
+ _ump_osu_errcode_t err = _UMP_OSU_ERR_FAULT;
+ /* Parameter validation */
+ UMP_DEBUG_ASSERT_POINTER( lock );
+
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode,
+ ("unrecognised mode, %.8X\n", mode) );
+
+ /** @note since only one flag can be set, we use a switch statement here.
+ * Otherwise, MUST add an enum into the _ump_osu_lock_t to store the
+ * implemented lock type */
+ switch ( lock->flags )
+ {
+ case _UMP_OSU_LOCKFLAG_STATIC:
+ case _UMP_OSU_LOCKFLAG_DEFAULT:
+ /* Usual Mutex type */
+ {
+ /* This is not subject to UMP_CHECK - overriding the result would cause a programming error */
+ if ( 0 == pthread_mutex_trylock( &lock->mutex ) )
+ {
+ err = _UMP_OSU_ERR_OK;
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as
+ || mode == lock->locked_as,
+ ("tried as mode==%.8X, but was locked as %.8X\n", mode, lock->locked_as) );
+ UMP_DEBUG_CODE( lock->locked_as = mode );
+ }
+ }
+ break;
+
+ case _UMP_OSU_LOCKFLAG_ANYUNLOCK:
+ /** @note Use of bitflags in a case statement ONLY works because this
+ * is the ONLY flag that is supported */
+
+ /* lock the mutex protecting access to the state field */
+ pthread_mutex_lock(&lock->mutex);
+
+ if ( UMP_FALSE == lock->state)
+ {
+ /* unlocked, take the lock */
+ lock->state = UMP_TRUE;
+ err = _UMP_OSU_ERR_OK;
+ }
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ /* Can do this regardless of whether we obtained ANYUNLOCK: */
+
+
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as
+ || mode == lock->locked_as,
+ ("tried as mode==%.8X, but was locked as %.8X\n", mode, lock->locked_as) );
+ /* If we were already locked, this does no harm, because of the above assert: */
+ UMP_DEBUG_CODE( lock->locked_as = mode );
+
+ pthread_mutex_unlock(&lock->mutex);
+ break;
+
+ default:
+ UMP_DEBUG_ERROR( ("lock has incorrect flags==%.8X\n", lock->flags) );
+ break;
+ }
+
+ return err;
+}
+
+
+void _ump_osu_lock_signal( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode )
+{
+ /* Parameter validation */
+ UMP_DEBUG_ASSERT_POINTER( lock );
+
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode,
+ ("unrecognised mode, %.8X\n", mode) );
+
+ /** @note since only one flag can be set, we use a switch statement here.
+ * Otherwise, MUST add an enum into the _ump_osu_lock_t to store the
+ * implemented lock type */
+ switch ( lock->flags )
+ {
+ case _UMP_OSU_LOCKFLAG_STATIC:
+ case _UMP_OSU_LOCKFLAG_DEFAULT:
+ /* Usual Mutex type */
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ UMP_DEBUG_ASSERT( mode == lock->locked_as,
+ ("This lock was locked as==%.8X, but tried to unlock as mode==%.8X\n", lock->locked_as, mode));
+ UMP_DEBUG_CODE( lock->locked_as = _UMP_OSU_LOCKMODE_UNDEF );
+
+ {
+ int call_result;
+ call_result = pthread_mutex_unlock( &lock->mutex );
+ UMP_DEBUG_ASSERT( 0 == call_result,
+ ("pthread_mutex_lock call failed with error code %d\n", call_result));
+ UMP_IGNORE( call_result );
+ }
+ break;
+
+ case _UMP_OSU_LOCKFLAG_ANYUNLOCK:
+ /** @note Use of bitflags in a case statement ONLY works because this
+ * is the ONLY flag that is supported */
+
+ pthread_mutex_lock(&lock->mutex);
+ UMP_DEBUG_ASSERT( UMP_TRUE == lock->state, ("Unlocking a _ump_osu_lock_t %p which is not locked\n", lock));
+
+ /* DEBUG tracking of previously locked state - occurs while lock is obtained */
+ UMP_DEBUG_ASSERT( mode == lock->locked_as,
+ ("This lock was locked as==%.8X, but tried to unlock as %.8X\n", lock->locked_as, mode ));
+ UMP_DEBUG_CODE( lock->locked_as = _UMP_OSU_LOCKMODE_UNDEF );
+
+ /* mark as unlocked */
+ lock->state = UMP_FALSE;
+
+ /* signal the condition, only wake a single thread */
+ pthread_cond_signal(&lock->condition);
+
+ pthread_mutex_unlock(&lock->mutex);
+ break;
+
+ default:
+ UMP_DEBUG_ERROR( ("lock has incorrect flags==%.8X\n", lock->flags) );
+ break;
+ }
+}
+
+void _ump_osu_lock_term( _ump_osu_lock_t *lock )
+{
+ int call_result;
+ UMP_DEBUG_ASSERT_POINTER( lock );
+
+ /** Debug lock checking: */
+ /* Lock is signalled on terminate - not a guarantee, since we could be locked immediately beforehand */
+ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as,
+ ("cannot terminate held lock\n") );
+
+ call_result = pthread_mutex_destroy( &lock->mutex );
+ UMP_DEBUG_ASSERT( 0 == call_result,
+ ("Incorrect mutex use detected: pthread_mutex_destroy call failed with error code %d\n", call_result) );
+
+ /* Destroy extra state for ANY_UNLOCK type osu_locks */
+ if ( lock->flags & _UMP_OSU_LOCKFLAG_ANYUNLOCK )
+ {
+ UMP_DEBUG_ASSERT( UMP_FALSE == lock->state, ("terminate called on locked object %p\n", lock));
+ call_result = pthread_cond_destroy(&lock->condition);
+ UMP_DEBUG_ASSERT( 0 == call_result,
+ ("Incorrect condition-variable use detected: pthread_cond_destroy call failed with error code %d\n", call_result) );
+ }
+
+ UMP_IGNORE(call_result);
+
+ _ump_osu_free( lock );
+}
+
+_ump_osu_lock_t *_ump_osu_lock_static( u32 nr )
+{
+ UMP_DEBUG_ASSERT( nr < UMP_OSU_STATIC_LOCK_COUNT,
+ ("provided static lock index (%d) out of bounds (0 < nr < %d)\n", nr, UMP_OSU_STATIC_LOCK_COUNT) );
+ return &_ump_osu_static_locks[nr];
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ump/ump_osu.h>
+
+#include <stdlib.h>
+#include <string.h> /* memcmp, memchr, memset */
+
+/**
+ * @file ump_osu_memory.c
+ * File implements the user side of the OS interface
+ */
+
+void *_ump_osu_calloc( u32 n, u32 size )
+{
+ return calloc( n, size );
+}
+
+void *_ump_osu_malloc( u32 size )
+{
+ return malloc( size );
+}
+
+void *_ump_osu_realloc( void *ptr, u32 size )
+{
+ return realloc( ptr, size );
+}
+
+void _ump_osu_free( void *ptr )
+{
+ free( ptr );
+}
+
+void *_ump_osu_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy( dst, src, len );
+}
+
+void *_ump_osu_memset( void *ptr, u32 chr, u32 size )
+{
+ return memset( ptr, chr, size );
+}
+
+int _ump_osu_memcmp( const void *ptr1, const void *ptr2, u32 size )
+{
+ return memcmp( ptr1, ptr2, size );
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_uku.c
+ * File implements the user side of the user-kernel interface
+ */
+
+#include "../ump_uku.h"
+#include <stdio.h>
+#include "ump_ioctl.h"
+
+#include <sys/mman.h>
+
+/* Needed for file operations on the device file*/
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+
+static _ump_osu_errcode_t ump_driver_ioctl(void *context, u32 command, void *args);
+
+static int ump_ioctl_api_version_used = UMP_IOCTL_API_VERSION;
+
+/**
+ * The device file to access the UMP device driver
+ * This is a character special file giving access to the device driver.
+ * Usually created using the mknod command line utility.
+ */
+static const char ump_device_file_name[] = "/dev/ump";
+
+_ump_osu_errcode_t _ump_uku_open( void **context )
+{
+ int ump_device_file;
+ if(NULL == context)
+ {
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ ump_device_file = open(ump_device_file_name, O_RDWR);
+
+ if (-1 == ump_device_file)
+ {
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ {
+ struct _ump_uk_api_version_s args;
+ args.ctx = (void*)ump_device_file;
+ args.version = UMP_IOCTL_API_VERSION;
+ args.compatible = 3;
+ ump_driver_ioctl(args.ctx, UMP_IOC_QUERY_API_VERSION, &args);
+ if ( 1 != args.compatible )
+ {
+ if (IS_API_MATCH(MAKE_VERSION_ID(1), args.version))
+ {
+ ump_ioctl_api_version_used = MAKE_VERSION_ID(1);
+ UMP_PRINTF("The UMP devicedriver does not support cached UMP. Update it if this is needed.\n");
+ }
+ else
+ {
+ UMP_PRINTF("The UMP devicedriver is version: %d, UMP libraries is version: %d.\n", GET_VERSION(args.version), GET_VERSION(UMP_IOCTL_API_VERSION) );
+ close(ump_device_file);
+ return _UMP_OSU_ERR_FAULT;
+ }
+ }
+ }
+
+ *context = (void *) ump_device_file;
+ return _UMP_OSU_ERR_OK;
+}
+
+_ump_osu_errcode_t _ump_uku_close( void **context )
+{
+ if(NULL == context)
+ {
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ if(-1 == (int)*context)
+ {
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ close((int)*context);
+ *context = (void *)-1;
+
+ return _UMP_OSU_ERR_OK;
+}
+
+int _ump_uku_allocate(_ump_uk_allocate_s *args)
+{
+ return ump_driver_ioctl(args->ctx, UMP_IOC_ALLOCATE, args);
+}
+
+_ump_osu_errcode_t _ump_uku_ion_import(_ump_uk_ion_import_s *args)
+{
+ UMP_DEBUG_PRINT(3, ("%s UMP_IOC_ION_IMPORT=%x\n", __func__, UMP_IOC_ION_IMPORT));
+
+ return ump_driver_ioctl(args->ctx, UMP_IOC_ION_IMPORT, args);
+}
+
+_ump_osu_errcode_t _ump_uku_release(_ump_uk_release_s *args)
+{
+ return ump_driver_ioctl(args->ctx, UMP_IOC_RELEASE, args);
+}
+
+_ump_osu_errcode_t _ump_uku_size_get(_ump_uk_size_get_s *args)
+{
+ return ump_driver_ioctl(args->ctx, UMP_IOC_SIZE_GET, args);
+}
+
+
+void _ump_uku_msynch(_ump_uk_msync_s *args)
+{
+ /* This is for backwards compatibillity */
+ if ( MAKE_VERSION_ID(1) == ump_ioctl_api_version_used)
+ {
+ args->is_cached = 0;
+ if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED != args->op )
+ {
+ UMP_DEBUG_PRINT(3, ("Warning: Doing UMP cache flush operations on a Device Driver that does not support cached UMP mem.\n"));
+ }
+ return;
+ }
+ ump_driver_ioctl(args->ctx, UMP_IOC_MSYNC, args);
+}
+
+void _ump_uku_cache_operations_control( _ump_uk_cache_operations_control_s *args )
+{
+ ump_driver_ioctl(args->ctx, UMP_IOC_CACHE_OPERATIONS_CONTROL, args);
+}
+
+void _ump_uku_switch_hw_usage( _ump_uk_switch_hw_usage_s *args )
+{
+ ump_driver_ioctl(args->ctx, UMP_IOC_SWITCH_HW_USAGE, args);
+}
+
+void _ump_uku_lock( _ump_uk_lock_s *args )
+{
+ ump_driver_ioctl(args->ctx, UMP_IOC_LOCK, args);
+}
+
+void _ump_uku_unlock( _ump_uk_unlock_s *args )
+{
+ ump_driver_ioctl(args->ctx, UMP_IOC_UNLOCK, args);
+}
+
+int _ump_uku_map_mem(_ump_uk_map_mem_s *args)
+{
+ int flags;
+ if( -1 == (int)args->ctx )
+ {
+ return -1;
+ }
+
+ flags = MAP_SHARED;
+
+ /* This is for backwards compatibillity */
+ if ( MAKE_VERSION_ID(1) == ump_ioctl_api_version_used)
+ {
+ args->is_cached = 0;
+ }
+
+ /* If we want the Caching to be enabled we set the flags to be PRIVATE. The UMP DD reads this and do proper handling
+ Note: this enforces the user to use proper invalidation*/
+ if ( args->is_cached ) flags = MAP_PRIVATE;
+
+ args->mapping = mmap(NULL, args->size, PROT_READ | PROT_WRITE ,flags , (int)args->ctx, (off_t)args->secure_id * sysconf(_SC_PAGE_SIZE));
+ if (MAP_FAILED == args->mapping)
+ {
+ return -1;
+ }
+
+ args->cookie = 0; /* Cookie is not used in linux _ump_uku_unmap_mem */
+
+ return 0;
+}
+
+void _ump_uku_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+ /*
+ * If a smaller size is used Linux will just remove the requested range but don't tell
+ * the ump driver before all of it is unmapped, either via another unmap request or upon process shutdown.
+ * Unmapping too much will just ignore the overhead or hit undefined behavior,
+ * only affecting the calling process which could mess itself up in other ways anyway.
+ * So we don't need any security checks here.
+ */
+ munmap(args->mapping, args->size);
+}
+
+static _ump_osu_errcode_t ump_driver_ioctl(void *context, u32 command, void *args)
+{
+ /*UMP_CHECK_NON_NULL(args, _UMP_OSK_ERR_INVALID_ARGS);*/
+
+ /* check for a valid file descriptor */
+ /** @note manual type safety check-point */
+ if( -1 == (int)context )
+ {
+ return _UMP_OSU_ERR_FAULT;
+ }
+
+ /* call ioctl handler of driver */
+ if (0 != ioctl((int)context, command, args)) return _UMP_OSU_ERR_FAULT;
+ return _UMP_OSU_ERR_OK;
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file ump_uku.h
+ * Defines the user-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKU_H__
+#define __UMP_UKU_H__
+
+#include <ump/ump_osu.h>
+#include <ump/ump_debug.h>
+#include <ump/ump_uk_types.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_ump_osu_errcode_t _ump_uku_open( void **context );
+
+_ump_osu_errcode_t _ump_uku_close( void **context );
+
+_ump_osu_errcode_t _ump_uku_allocate( _ump_uk_allocate_s *args );
+
+_ump_osu_errcode_t _ump_uku_ion_import(_ump_uk_ion_import_s *args);
+
+_ump_osu_errcode_t _ump_uku_release( _ump_uk_release_s *args );
+
+_ump_osu_errcode_t _ump_uku_size_get( _ump_uk_size_get_s *args );
+
+_ump_osu_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+int _ump_uku_map_mem( _ump_uk_map_mem_s *args );
+
+void _ump_uku_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_uku_msynch(_ump_uk_msync_s *args);
+
+int _ump_uku_map_mem( _ump_uk_map_mem_s *args );
+
+void _ump_uku_cache_operations_control( _ump_uk_cache_operations_control_s *args );
+void _ump_uku_switch_hw_usage( _ump_uk_switch_hw_usage_s *dd_msync_call_arg );
+void _ump_uku_lock( _ump_uk_lock_s *dd_msync_call_arg );
+void _ump_uku_unlock( _ump_uk_unlock_s *dd_msync_call_arg );
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKU_H__ */
#include "ump.h"
#include "ump_ref_drv.h"
+#include "secion.h"
/*****************************************************************************/
#include <limits.h>
#include "ump.h"
#include "ump_ref_drv.h"
+#include "secion.h"
#include "s5p_fimc.h"
#include "exynos_mem.h"
static pthread_mutex_t s_map_lock = PTHREAD_MUTEX_INITIALIZER;
}
#endif
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
- ion_msync(hnd->ion_client, hnd->fd, IMSYNC_DEV_TO_RW | IMSYNC_SYNC_FOR_DEV, hnd->size, hnd->offset);
+ ion_msync(hnd->ion_client, hnd->fd, (ION_MSYNC_FLAGS) (IMSYNC_DEV_TO_RW | IMSYNC_SYNC_FOR_DEV), hnd->size, hnd->offset);
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_IOCTL) {
int ret;
return munmap(addr, len);
}
-int ion_msync(ion_client client, ion_buffer buffer, enum ION_MSYNC_FLAGS flags, size_t size, off_t offset)
+int ion_msync(ion_client client, ion_buffer buffer, long flags, size_t size, off_t offset)
{
struct ion_msync_data arg_cdata;
arg_cdata.size = size;
- arg_cdata.dir = flags;
+ arg_cdata.dir = (ION_MSYNC_FLAGS) flags;
arg_cdata.fd = buffer;
arg_cdata.offset = offset;