*/
static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
{
+ int memflags = call_vb_qop(vb, mem_flags, vb);
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
int plane;
mem_priv = call_ptr_memop(vb, alloc,
q->alloc_devs[plane] ? : q->dev,
- q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+ q->dma_attrs, size, q->dma_dir, q->gfp_flags,
+ memflags);
if (IS_ERR_OR_NULL(mem_priv)) {
if (mem_priv)
ret = PTR_ERR(mem_priv);
static void vb2_process_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
+ int memflags = call_vb_qop(vb, mem_flags, vb);
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
unsigned int plane;
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv,
- vb->planes[plane].bytesused);
+ vb->planes[plane].bytesused, memflags);
}
spin_lock_irqsave(&q->done_lock, flags);
unsigned int plane;
int ret = 0;
bool reacquired = vb->planes[0].mem_priv == NULL;
+ int memflags = call_vb_qop(vb, mem_flags, vb);
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
mem_priv = call_ptr_memop(vb, get_userptr,
q->alloc_devs[plane] ? : q->dev,
planes[plane].m.userptr,
- planes[plane].length, q->dma_dir);
+ planes[plane].length, q->dma_dir, memflags);
if (IS_ERR(mem_priv)) {
dprintk(1, "failed acquiring userspace memory for plane %d\n",
plane);
unsigned int plane;
int ret = 0;
bool reacquired = vb->planes[0].mem_priv == NULL;
+ int memflags = call_vb_qop(vb, mem_flags, vb);
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv,
- planes[plane].bytesused);
+ planes[plane].bytesused, memflags);
if (ret) {
dprintk(1, "failed to map dmabuf for plane %d\n",
static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
{
+ int memflags = call_vb_qop(vb, mem_flags, vb);
struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
int ret;
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, prepare, vb->planes[plane].mem_priv,
- vb->planes[plane].bytesused);
+ vb->planes[plane].bytesused, memflags);
vb->state = VB2_BUF_STATE_PREPARED;
*/
for (i = 0; i < q->num_buffers; ++i) {
struct vb2_buffer *vb = q->bufs[i];
+ int memflags = call_vb_qop(vb, mem_flags, vb);
if (vb->state == VB2_BUF_STATE_PREPARED ||
vb->state == VB2_BUF_STATE_QUEUED) {
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish,
vb->planes[plane].mem_priv,
- 0);
+ 0, memflags);
}
if (vb->state != VB2_BUF_STATE_DEQUEUED) {
static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+ gfp_t gfp_flags, int memflags)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
}
}
-static void vb2_dma_sg_prepare(void *buf_priv, size_t size)
+static void vb2_dma_sg_prepare(void *buf_priv, size_t size, int memflags)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
}
}
-static void vb2_dma_sg_finish(void *buf_priv, size_t size)
+static void vb2_dma_sg_finish(void *buf_priv, size_t size, int memflags)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size,
- enum dma_data_direction dma_dir)
+ enum dma_data_direction dma_dir,
+ int memflags)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
/* callbacks for DMABUF buffers */
/*********************************************/
-static int vb2_dma_sg_map_dmabuf(void *mem_priv, size_t size)
+static int vb2_dma_sg_map_dmabuf(void *mem_priv, size_t size, int memflags)
{
struct vb2_dma_sg_buf *buf = mem_priv;
struct sg_table *sgt;
void *(*alloc)(struct device *dev, unsigned long attrs,
unsigned long size,
enum dma_data_direction dma_dir,
- gfp_t gfp_flags);
+ gfp_t gfp_flags, int memflags);
void (*put)(void *buf_priv);
struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
void *(*get_userptr)(struct device *dev, unsigned long vaddr,
unsigned long size,
- enum dma_data_direction dma_dir);
+ enum dma_data_direction dma_dir,
+ int memflags);
void (*put_userptr)(void *buf_priv);
- void (*prepare)(void *buf_priv, size_t size);
- void (*finish)(void *buf_priv, size_t size);
+ void (*prepare)(void *buf_priv, size_t size, int memflags);
+ void (*finish)(void *buf_priv, size_t size, int memflags);
void *(*attach_dmabuf)(struct device *dev,
struct dma_buf *dbuf,
unsigned long size,
enum dma_data_direction dma_dir);
void (*detach_dmabuf)(void *buf_priv);
- int (*map_dmabuf)(void *buf_priv, size_t size);
+ int (*map_dmabuf)(void *buf_priv, size_t size,
+ int memflags);
void (*unmap_dmabuf)(void *buf_priv, size_t size);
void *(*vaddr)(void *buf_priv);
* ioctl; might be called before @start_streaming callback
* if user pre-queued buffers before calling
* VIDIOC_STREAMON().
+ * @mem_flags: called before events of buffer manipulations including
+ * buffer acquisition and mappin to study extra information
+ * vb2 mem implementations. The return values of
+ * @mem_flags are implementation specific.
*/
struct vb2_ops {
int (*queue_setup)(struct vb2_queue *q,
bool (*is_unordered)(struct vb2_queue *q);
void (*buf_queue)(struct vb2_buffer *vb);
+ int (*mem_flags)(struct vb2_buffer *vb);
};
/**
u32 cnt_start_streaming;
u32 cnt_stop_streaming;
u32 cnt_is_unordered;
+ u32 cnt_mem_flags;
#endif
};