struct list_head event_list;
struct semaphore event_list_sem;
wait_queue_head_t wait;
- unsigned long vm_start;
+
struct fw_iso_context *iso_context;
+ struct fw_iso_buffer buffer;
+ unsigned long vm_start;
};
static inline void __user *
client->iso_context = fw_iso_context_create(client->device->card,
FW_ISO_CONTEXT_TRANSMIT,
- request.buffer_size,
iso_callback, client);
if (IS_ERR(client->iso_context))
return PTR_ERR(client->iso_context);
{
struct fw_cdev_queue_iso request;
struct fw_cdev_iso_packet __user *p, *end, *next;
- void *payload, *payload_end;
- unsigned long index;
+ unsigned long payload, payload_end;
int count;
struct {
struct fw_iso_packet packet;
/* If the user passes a non-NULL data pointer, has mmap()'ed
* the iso buffer, and the pointer points inside the buffer,
* we setup the payload pointers accordingly. Otherwise we
- * set them both to NULL, which will still let packets with
+ * set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped
* and the request.data pointer is ignored.*/
- index = (unsigned long)request.data - client->vm_start;
- if (request.data != 0 && client->vm_start != 0 &&
- index <= client->iso_context->buffer_size) {
- payload = client->iso_context->buffer + index;
- payload_end = client->iso_context->buffer +
- client->iso_context->buffer_size;
- } else {
- payload = NULL;
- payload_end = NULL;
+ payload = (unsigned long)request.data - client->vm_start;
+ payload_end = payload + (client->buffer.page_count << PAGE_SHIFT);
+ if (request.data == 0 || client->buffer.pages == NULL ||
+ payload >= payload_end) {
+ payload = 0;
+ payload_end = 0;
}
if (!access_ok(VERIFY_READ, request.packets, request.size))
return -EINVAL;
if (fw_iso_context_queue(client->iso_context,
- &u.packet, payload))
+ &u.packet, &client->buffer, payload))
break;
p = next;
request.size -= uptr_to_u64(p) - request.packets;
request.packets = uptr_to_u64(p);
- request.data =
- client->vm_start + (payload - client->iso_context->buffer);
+ request.data = client->vm_start + payload;
if (copy_to_user(arg, &request, sizeof request))
return -EFAULT;
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
+ enum dma_data_direction direction;
+ unsigned long size;
+ int page_count, retval;
+
+ /* FIXME: We could support multiple buffers, but we don't. */
+ if (client->buffer.pages != NULL)
+ return -EBUSY;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
- if (client->iso_context->buffer == NULL)
+ if (vma->vm_start & ~PAGE_MASK)
return -EINVAL;
client->vm_start = vma->vm_start;
+ size = vma->vm_end - vma->vm_start;
+ page_count = size >> PAGE_SHIFT;
+ if (size & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ direction = DMA_TO_DEVICE;
+ else
+ direction = DMA_FROM_DEVICE;
+
+ retval = fw_iso_buffer_init(&client->buffer, client->device->card,
+ page_count, direction);
+ if (retval < 0)
+ return retval;
- return remap_vmalloc_range(vma, client->iso_context->buffer, 0);
+ retval = fw_iso_buffer_map(&client->buffer, vma);
+ if (retval < 0)
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+ return retval;
}
static int fw_device_op_release(struct inode *inode, struct file *file)
struct address_handler *h, *next;
struct request *r, *next_r;
+ if (client->buffer.pages)
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
if (client->iso_context)
fw_iso_context_destroy(client->iso_context);
};
struct fw_cdev_create_iso_context {
- __u32 buffer_size;
+ __u32 handle;
};
struct fw_cdev_iso_packet {
#include "fw-topology.h"
#include "fw-device.h"
-static int
-setup_iso_buffer(struct fw_iso_context *ctx, size_t size,
- enum dma_data_direction direction)
+int
+fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction)
{
- struct page *page;
- int i, j;
- void *p;
-
- ctx->buffer_size = PAGE_ALIGN(size);
- if (size == 0)
- return 0;
-
- ctx->buffer = vmalloc_32_user(ctx->buffer_size);
- if (ctx->buffer == NULL)
- goto fail_buffer_alloc;
-
- ctx->page_count = ctx->buffer_size >> PAGE_SHIFT;
- ctx->pages =
- kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL);
- if (ctx->pages == NULL)
- goto fail_pages_alloc;
-
- p = ctx->buffer;
- for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) {
- page = vmalloc_to_page(p);
- ctx->pages[i] = dma_map_page(ctx->card->device,
- page, 0, PAGE_SIZE, direction);
- if (dma_mapping_error(ctx->pages[i]))
- goto fail_mapping;
+ int i, j, retval = -ENOMEM;
+ dma_addr_t address;
+
+ buffer->page_count = page_count;
+ buffer->direction = direction;
+
+ buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
+ GFP_KERNEL);
+ if (buffer->pages == NULL)
+ goto out;
+
+ for (i = 0; i < buffer->page_count; i++) {
+ buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
+ if (buffer->pages[i] == NULL)
+ goto out_pages;
+
+ address = dma_map_page(card->device, buffer->pages[i],
+ 0, PAGE_SIZE, direction);
+ if (dma_mapping_error(address)) {
+ __free_page(buffer->pages[i]);
+ goto out_pages;
+ }
+ set_page_private(buffer->pages[i], address);
}
return 0;
- fail_mapping:
- for (j = 0; j < i; j++)
- dma_unmap_page(ctx->card->device, ctx->pages[j],
+ out_pages:
+ for (j = 0; j < i; j++) {
+ address = page_private(buffer->pages[j]);
+ dma_unmap_page(card->device, address,
PAGE_SIZE, DMA_TO_DEVICE);
- fail_pages_alloc:
- vfree(ctx->buffer);
- fail_buffer_alloc:
- return -ENOMEM;
+ __free_page(buffer->pages[j]);
+ }
+ kfree(buffer->pages);
+ out:
+ buffer->pages = NULL;
+ return retval;
+}
+
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+{
+ unsigned long uaddr;
+ int i, retval;
+
+ uaddr = vma->vm_start;
+ for (i = 0; i < buffer->page_count; i++) {
+ retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
+ if (retval)
+ return retval;
+ uaddr += PAGE_SIZE;
+ }
+
+ return 0;
}
-static void destroy_iso_buffer(struct fw_iso_context *ctx)
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
+ struct fw_card *card)
{
int i;
+ dma_addr_t address;
- for (i = 0; i < ctx->page_count; i++)
- dma_unmap_page(ctx->card->device, ctx->pages[i],
+ for (i = 0; i < buffer->page_count; i++) {
+ address = page_private(buffer->pages[i]);
+ dma_unmap_page(card->device, address,
PAGE_SIZE, DMA_TO_DEVICE);
+ __free_page(buffer->pages[i]);
+ }
- kfree(ctx->pages);
- vfree(ctx->buffer);
+ kfree(buffer->pages);
+ buffer->pages = NULL;
}
struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type,
- size_t buffer_size,
fw_iso_callback_t callback,
void *callback_data)
{
struct fw_iso_context *ctx;
- int retval;
ctx = card->driver->allocate_iso_context(card, type);
if (IS_ERR(ctx))
ctx->callback = callback;
ctx->callback_data = callback_data;
- retval = setup_iso_buffer(ctx, buffer_size, DMA_TO_DEVICE);
- if (retval < 0) {
- card->driver->free_iso_context(ctx);
- return ERR_PTR(retval);
- }
-
return ctx;
}
EXPORT_SYMBOL(fw_iso_context_create);
{
struct fw_card *card = ctx->card;
- destroy_iso_buffer(ctx);
-
card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
int
fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet, void *payload)
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct fw_card *card = ctx->card;
- return card->driver->queue_iso(ctx, packet, payload);
+ return card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
static int
ohci_queue_iso(struct fw_iso_context *base,
- struct fw_iso_packet *packet, void *payload)
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct iso_context *ctx = (struct iso_context *)base;
struct fw_ohci *ohci = fw_ohci(ctx->base.card);
struct descriptor *d, *end, *last, *tail, *pd;
struct fw_iso_packet *p;
__le32 *header;
- dma_addr_t d_bus;
+ dma_addr_t d_bus, page_bus;
u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index;
int index, page, end_page, i, length, offset;
* packet, retransmit or terminate.. */
p = packet;
- payload_index = payload - ctx->base.buffer;
+ payload_index = payload;
d = ctx->head_descriptor;
tail = ctx->tail_descriptor;
end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor);
length =
min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length);
- pd[i].data_address = cpu_to_le32(ctx->base.pages[page] + offset);
+
+ page_bus = page_private(buffer->pages[page]);
+ pd[i].data_address = cpu_to_le32(page_bus + offset);
payload_index += length;
}
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/fs.h>
+#include <linux/dma-mapping.h>
#define TCODE_WRITE_QUADLET_REQUEST 0
#define TCODE_WRITE_BLOCK_REQUEST 1
typedef void (*fw_iso_callback_t) (struct fw_iso_context *context,
int status, u32 cycle, void *data);
+/* An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma. */
+
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+};
+
struct fw_iso_context {
struct fw_card *card;
int type;
int speed;
fw_iso_callback_t callback;
void *callback_data;
-
- void *buffer;
- size_t buffer_size;
- dma_addr_t *pages;
- int page_count;
};
+int
+fw_iso_buffer_init(struct fw_iso_buffer *buffer,
+ struct fw_card *card,
+ int page_count,
+ enum dma_data_direction direction);
+int
+fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void
+fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
struct fw_iso_context *
fw_iso_context_create(struct fw_card *card, int type,
- size_t buffer_size,
fw_iso_callback_t callback,
void *callback_data);
+
void
fw_iso_context_destroy(struct fw_iso_context *ctx);
int
fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet, void *payload);
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
int
fw_iso_context_send(struct fw_iso_context *ctx,
int (*send_iso)(struct fw_iso_context *ctx, s32 cycle);
int (*queue_iso)(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet, void *payload);
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
};
int