dev->ts1.ts_packet_count = mpeglines;
*num_planes = 1;
sizes[0] = mpeglinesize * mpeglines;
+ alloc_ctxs[0] = dev->alloc_ctx;
*num_buffers = mpegbufs;
return 0;
}
if (!pci_dma_supported(pci_dev, 0xffffffff)) {
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
err = -EIO;
- goto fail_irq;
+ goto fail_context;
}
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail_context;
+ }
err = request_irq(pci_dev->irq, cx23885_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
return 0;
fail_irq:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
+fail_context:
cx23885_dev_unregister(dev);
fail_ctrl:
v4l2_ctrl_handler_free(hdl);
free_irq(pci_dev->irq, dev);
cx23885_dev_unregister(dev);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
port->ts_packet_count = 32;
*num_planes = 1;
sizes[0] = port->ts_packet_size * port->ts_packet_count;
+ alloc_ctxs[0] = port->dev->alloc_ctx;
*num_buffers = 32;
return 0;
}
lines = VBI_NTSC_LINE_COUNT;
*num_planes = 1;
sizes[0] = lines * VBI_LINE_LENGTH * 2;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
*num_planes = 1;
sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
struct vb2_queue vb2_vidq;
struct cx23885_dmaqueue vbiq;
struct vb2_queue vb2_vbiq;
+ void *alloc_ctx;
spinlock_t slock;
saa7134_board_init1(dev);
saa7134_hwinit1(dev);
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail3;
+ }
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
- goto fail3;
+ goto fail4;
}
/* wait a bit, register i2c bus */
if (err < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
- goto fail4;
+ goto fail5;
}
printk(KERN_INFO "%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0)
- goto fail4;
+ goto fail5;
printk(KERN_INFO "%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
- goto fail4;
+ goto fail5;
printk(KERN_INFO "%s: registered device %s\n",
dev->name, video_device_node_name(dev->radio_dev));
}
request_submodules(dev);
return 0;
- fail4:
+ fail5:
saa7134_unregister_video(dev);
saa7134_i2c_unregister(dev);
free_irq(pci_dev->irq, dev);
+ fail4:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail3:
saa7134_hwfini(dev);
iounmap(dev->lmmio);
/* release resources */
free_irq(pci_dev->irq, dev);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
iounmap(dev->lmmio);
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
*nbuffers = 3;
*nplanes = 1;
sizes[0] = size;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
EXPORT_SYMBOL_GPL(saa7134_ts_queue_setup);
*nbuffers = saa7134_buffer_count(size, *nbuffers);
*nplanes = 1;
sizes[0] = size;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
*nbuffers = saa7134_buffer_count(size, *nbuffers);
*nplanes = 1;
sizes[0] = size;
+ alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
/* video+ts+vbi capture */
+ void *alloc_ctx;
struct saa7134_dmaqueue video_q;
struct vb2_queue video_vbq;
struct saa7134_dmaqueue vbi_q;
unsigned int *num_planes, unsigned int sizes[],
void *alloc_ctxs[])
{
+ struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
+
sizes[0] = FRAME_BUF_SIZE;
+ alloc_ctxs[0] = solo_enc->alloc_ctx;
*num_planes = 1;
if (*num_buffers < MIN_VID_BUFFERS)
return ERR_PTR(-ENOMEM);
hdl = &solo_enc->hdl;
+ solo_enc->alloc_ctx = vb2_dma_sg_init_ctx(&solo_dev->pdev->dev);
+ if (IS_ERR(solo_enc->alloc_ctx)) {
+ ret = PTR_ERR(solo_enc->alloc_ctx);
+ goto hdl_free;
+ }
v4l2_ctrl_handler_init(hdl, 10);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
solo_enc->desc_items, solo_enc->desc_dma);
hdl_free:
v4l2_ctrl_handler_free(hdl);
+ vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
kfree(solo_enc);
return ERR_PTR(ret);
}
solo_enc->desc_items, solo_enc->desc_dma);
video_unregister_device(solo_enc->vfd);
v4l2_ctrl_handler_free(&solo_enc->hdl);
+ vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
kfree(solo_enc);
}
u32 sequence;
struct vb2_queue vidq;
struct list_head vidq_active;
+ void *alloc_ctx;
int desc_count;
int desc_nelts;
struct solo_p2m_desc *desc_items;
/* Then do any initialisation wanted before interrupts are on */
tw68_hw_init1(dev);
+ dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ err = PTR_ERR(dev->alloc_ctx);
+ goto fail3;
+ }
+
/* get irq */
err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
- goto fail3;
+ goto fail4;
}
/*
if (err < 0) {
pr_err("%s: can't register video device\n",
dev->name);
- goto fail4;
+ goto fail5;
}
tw_setl(TW68_INTMASK, dev->pci_irqmask);
return 0;
-fail4:
+fail5:
video_unregister_device(&dev->vdev);
+fail4:
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail3:
iounmap(dev->lmmio);
fail2:
/* unregister */
video_unregister_device(&dev->vdev);
v4l2_ctrl_handler_free(&dev->hdl);
+ vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
/* release resources */
iounmap(dev->lmmio);
unsigned tot_bufs = q->num_buffers + *num_buffers;
sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
+ alloc_ctxs[0] = dev->alloc_ctx;
/*
* We allow create_bufs, but only if the sizeimage is the same as the
* current sizeimage. The tw68_buffer_count calculation becomes quite
unsigned field;
struct vb2_queue vidq;
struct list_head active;
+ void *alloc_ctx;
/* various v4l controls */
const struct tw68_tvnorm *tvnorm; /* video */
*nbufs = minbufs;
if (cam->buffer_mode == B_DMA_contig)
alloc_ctxs[0] = cam->vb_alloc_ctx;
+ else if (cam->buffer_mode == B_DMA_sg)
+ alloc_ctxs[0] = cam->vb_alloc_ctx_sg;
return 0;
}
vq->ops = &mcam_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
- cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
vq->io_modes = VB2_MMAP | VB2_USERPTR;
cam->dma_setup = mcam_ctlr_dma_contig;
cam->frame_complete = mcam_dma_contig_done;
+ cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
+ if (IS_ERR(cam->vb_alloc_ctx))
+ return PTR_ERR(cam->vb_alloc_ctx);
#endif
break;
case B_DMA_sg:
vq->io_modes = VB2_MMAP | VB2_USERPTR;
cam->dma_setup = mcam_ctlr_dma_sg;
cam->frame_complete = mcam_dma_sg_done;
+ cam->vb_alloc_ctx_sg = vb2_dma_sg_init_ctx(cam->dev);
+ if (IS_ERR(cam->vb_alloc_ctx_sg))
+ return PTR_ERR(cam->vb_alloc_ctx_sg);
#endif
break;
case B_vmalloc:
if (cam->buffer_mode == B_DMA_contig)
vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
#endif
+#ifdef MCAM_MODE_DMA_SG
+ if (cam->buffer_mode == B_DMA_sg)
+ vb2_dma_sg_cleanup_ctx(cam->vb_alloc_ctx_sg);
+#endif
}
/* DMA buffers - DMA modes */
struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
struct vb2_alloc_ctx *vb_alloc_ctx;
+ struct vb2_alloc_ctx *vb_alloc_ctx_sg;
/* Mode-specific ops, set at open time */
void (*dma_setup)(struct mcam_camera *cam);
printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
} while (0)
+struct vb2_dma_sg_conf {
+ struct device *dev;
+};
+
struct vb2_dma_sg_buf {
+ struct device *dev;
void *vaddr;
struct page **pages;
int offset;
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
+ struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
int ret;
int num_pages;
+ if (WARN_ON(alloc_ctx == NULL))
+ return NULL;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
if (ret)
goto fail_table_alloc;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(conf->dev);
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
+ put_device(buf->dev);
kfree(buf);
}
}
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+void *vb2_dma_sg_init_ctx(struct device *dev)
+{
+ struct vb2_dma_sg_conf *conf;
+
+ conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return ERR_PTR(-ENOMEM);
+
+ conf->dev = dev;
+
+ return conf;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
+
+void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
+{
+ if (!IS_ERR_OR_NULL(alloc_ctx))
+ kfree(alloc_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
+
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");
return (struct sg_table *)vb2_plane_cookie(vb, plane_no);
}
+void *vb2_dma_sg_init_ctx(struct device *dev);
+void vb2_dma_sg_cleanup_ctx(void *alloc_ctx);
+
extern const struct vb2_mem_ops vb2_dma_sg_memops;
#endif