#define descriptor_irq_error (1 << 4)
#define descriptor_irq_always (3 << 4)
#define descriptor_branch_always (3 << 2)
+#define descriptor_wait (3 << 0)
struct descriptor {
__le16 req_count;
__le16 transfer_status;
} __attribute__((aligned(16)));
+struct db_descriptor {
+ __le16 first_size;
+ __le16 control;
+ __le16 second_req_count;
+ __le16 first_req_count;
+ __le32 branch_address;
+ __le16 second_res_count;
+ __le16 first_res_count;
+ __le32 reserved0;
+ __le32 first_buffer;
+ __le32 second_buffer;
+ __le32 reserved1;
+} __attribute__((aligned(16)));
+
#define control_set(regs) (regs)
#define control_clear(regs) ((regs) + 4)
#define command_ptr(regs) ((regs) + 12)
return container_of(card, struct fw_ohci, card);
}
-#define CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
+#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
+#define IR_CONTEXT_BUFFER_FILL 0x80000000
+#define IR_CONTEXT_ISOCH_HEADER 0x40000000
+#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
+#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
+#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
#define CONTEXT_RUN 0x8000
#define CONTEXT_WAKE 0x1000
return d;
}
-static void context_run(struct context *ctx, u32 cycle_match)
+static void context_run(struct context *ctx, u32 extra)
{
struct fw_ohci *ohci = ctx->ohci;
reg_write(ohci, command_ptr(ctx->regs),
le32_to_cpu(ctx->tail_descriptor_last->branch_address));
reg_write(ohci, control_clear(ctx->regs), ~0);
- reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | cycle_match);
+ reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | extra);
flush_writes(ohci);
}
return retval;
}
-static void ir_context_tasklet(unsigned long data)
+static int handle_ir_packet(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
{
- struct iso_context *ctx = (struct iso_context *)data;
+ struct iso_context *ctx =
+ container_of(context, struct iso_context, context);
+ struct db_descriptor *db = (struct db_descriptor *) d;
+
+ if (db->first_res_count > 0 && db->second_res_count > 0)
+ /* This descriptor isn't done yet, stop iteration. */
+ return 0;
+
+ if (le16_to_cpu(db->control) & descriptor_irq_always)
+ /* FIXME: we should pass payload address here. */
+ ctx->base.callback(&ctx->base,
+ 0, 0,
+ ctx->base.callback_data);
- (void)ctx;
+ return 1;
}
#define ISO_BUFFER_SIZE (64 * 1024)
struct fw_ohci *ohci = fw_ohci(card);
struct iso_context *ctx, *list;
descriptor_callback_t callback;
- u32 *mask;
+ u32 *mask, regs;
unsigned long flags;
int index, retval;
list = ohci->it_context_list;
callback = handle_it_packet;
} else {
- return ERR_PTR(-EINVAL);
+ mask = &ohci->ir_context_mask;
+ list = ohci->ir_context_list;
+ callback = handle_ir_packet;
}
spin_lock_irqsave(&ohci->lock, flags);
if (index < 0)
return ERR_PTR(-EBUSY);
+ if (type == FW_ISO_CONTEXT_TRANSMIT)
+ regs = OHCI1394_IsoXmitContextBase(index);
+ else
+ regs = OHCI1394_IsoRcvContextBase(index);
+
ctx = &list[index];
memset(ctx, 0, sizeof *ctx);
retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
- OHCI1394_IsoXmitContextBase(index), callback);
+ regs, callback);
if (retval < 0) {
spin_lock_irqsave(&ohci->lock, flags);
*mask |= 1 << index;
u32 cycle_match = 0;
int index;
- index = ctx - ohci->it_context_list;
- if (cycle > 0)
- cycle_match = CONTEXT_CYCLE_MATCH_ENABLE |
- (cycle & 0x7fff) << 16;
+ if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ index = ctx - ohci->it_context_list;
+ if (cycle > 0)
+ cycle_match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
+ (cycle & 0x7fff) << 16;
+
+ reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
+ context_run(&ctx->context, cycle_match);
+ } else {
+ index = ctx - ohci->ir_context_list;
- reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
- context_run(&ctx->context, cycle_match);
+ reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
+ reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
+ reg_write(ohci, context_match(ctx->context.regs),
+ 0xf0000000 | ctx->base.channel);
+ context_run(&ctx->context, IR_CONTEXT_DUAL_BUFFER_MODE);
+ }
return 0;
}
}
static int
-ohci_queue_iso(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
+ohci_queue_iso_transmit(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *last, *pd;
return 0;
}
+static int
+ohci_queue_iso_receive(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct db_descriptor *db = NULL;
+ struct descriptor *d;
+ struct fw_iso_packet *p;
+ dma_addr_t d_bus, page_bus;
+ u32 z, header_z, length, rest;
+ int page, offset;
+
+ /* FIXME: Cycle lost behavior should be configurable: lose
+ * packet, retransmit or terminate.. */
+
+ p = packet;
+ z = 2;
+
+ /* Get header size in number of descriptors. */
+ header_z = DIV_ROUND_UP(p->header_length, sizeof *d);
+ page = payload >> PAGE_SHIFT;
+ offset = payload & ~PAGE_MASK;
+ rest = p->payload_length;
+
+ /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
+ /* FIXME: handle descriptor_wait */
+ /* FIXME: make packet-per-buffer/dual-buffer a context option */
+ while (rest > 0) {
+ d = context_get_descriptors(&ctx->context,
+ z + header_z, &d_bus);
+ if (d == NULL)
+ return -ENOMEM;
+
+ db = (struct db_descriptor *) d;
+ db->control = cpu_to_le16(descriptor_status |
+ descriptor_branch_always);
+ db->first_size = cpu_to_le16(ctx->base.header_size);
+ db->first_req_count = cpu_to_le16(p->header_length);
+ db->second_req_count = cpu_to_le16(p->payload_length);
+ db->first_res_count = cpu_to_le16(db->first_req_count);
+ db->second_res_count = cpu_to_le16(db->second_req_count);
+
+ db->first_buffer = cpu_to_le32(d_bus + sizeof *db);
+
+ if (offset + rest < PAGE_SIZE)
+ length = rest;
+ else
+ length = PAGE_SIZE - offset;
+
+ page_bus = page_private(buffer->pages[page]);
+ db->second_buffer = cpu_to_le32(page_bus + offset);
+
+ context_append(&ctx->context, d, z, header_z);
+ offset = (offset + length) & ~PAGE_MASK;
+ rest -= length;
+ page++;
+ }
+
+ if (p->interrupt)
+ db->control |= cpu_to_le16(descriptor_irq_always);
+
+ return 0;
+ }
+
+static int
+ohci_queue_iso(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+ return ohci_queue_iso_transmit(base, packet, buffer, payload);
+ else
+ return ohci_queue_iso_receive(base, packet, buffer, payload);
+}
+
static const struct fw_card_driver ohci_driver = {
.name = ohci_driver_name,
.enable = ohci_enable,