firewire: Generalize the iso transmit descriptor buffer logic.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / firewire / fw-ohci.c
CommitLineData
ed568912
KH
1/* -*- c-basic-offset: 8 -*-
2 *
3 * fw-ohci.c - Driver for OHCI 1394 boards
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
cf3e72fd
AM
28#include <linux/dma-mapping.h>
29
ed568912
KH
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-ohci.h"
35
36#define descriptor_output_more 0
37#define descriptor_output_last (1 << 12)
38#define descriptor_input_more (2 << 12)
39#define descriptor_input_last (3 << 12)
40#define descriptor_status (1 << 11)
41#define descriptor_key_immediate (2 << 8)
42#define descriptor_ping (1 << 7)
43#define descriptor_yy (1 << 6)
44#define descriptor_no_irq (0 << 4)
45#define descriptor_irq_error (1 << 4)
46#define descriptor_irq_always (3 << 4)
47#define descriptor_branch_always (3 << 2)
48
49struct descriptor {
50 __le16 req_count;
51 __le16 control;
52 __le32 data_address;
53 __le32 branch_address;
54 __le16 res_count;
55 __le16 transfer_status;
56} __attribute__((aligned(16)));
57
72e318e0
KH
58#define control_set(regs) (regs)
59#define control_clear(regs) ((regs) + 4)
60#define command_ptr(regs) ((regs) + 12)
61#define context_match(regs) ((regs) + 16)
62
32b46093 63struct ar_buffer {
ed568912 64 struct descriptor descriptor;
32b46093
KH
65 struct ar_buffer *next;
66 __le32 data[0];
67};
ed568912 68
32b46093
KH
69struct ar_context {
70 struct fw_ohci *ohci;
71 struct ar_buffer *current_buffer;
72 struct ar_buffer *last_buffer;
73 void *pointer;
72e318e0 74 u32 regs;
ed568912
KH
75 struct tasklet_struct tasklet;
76};
77
30200739
KH
78struct context;
79
80typedef int (*descriptor_callback_t)(struct context *ctx,
81 struct descriptor *d,
82 struct descriptor *last);
83struct context {
84 struct fw_ohci *ohci;
85 u32 regs;
86
87 struct descriptor *buffer;
88 dma_addr_t buffer_bus;
89 size_t buffer_size;
90 struct descriptor *head_descriptor;
91 struct descriptor *tail_descriptor;
92 struct descriptor *tail_descriptor_last;
93 struct descriptor *prev_descriptor;
94
95 descriptor_callback_t callback;
96
97 struct tasklet_struct tasklet;
98};
99
100
101
ed568912
KH
102struct at_context {
103 struct fw_ohci *ohci;
104 dma_addr_t descriptor_bus;
105 dma_addr_t buffer_bus;
730c32f5 106 struct fw_packet *current_packet;
ed568912
KH
107
108 struct list_head list;
109
110 struct {
111 struct descriptor more;
112 __le32 header[4];
113 struct descriptor last;
114 } d;
115
72e318e0 116 u32 regs;
ed568912
KH
117
118 struct tasklet_struct tasklet;
119};
120
121#define it_header_sy(v) ((v) << 0)
122#define it_header_tcode(v) ((v) << 4)
123#define it_header_channel(v) ((v) << 8)
124#define it_header_tag(v) ((v) << 14)
125#define it_header_speed(v) ((v) << 16)
126#define it_header_data_length(v) ((v) << 16)
127
128struct iso_context {
129 struct fw_iso_context base;
30200739 130 struct context context;
ed568912
KH
131};
132
133#define CONFIG_ROM_SIZE 1024
134
135struct fw_ohci {
136 struct fw_card card;
137
138 __iomem char *registers;
139 dma_addr_t self_id_bus;
140 __le32 *self_id_cpu;
141 struct tasklet_struct bus_reset_tasklet;
e636fe25 142 int node_id;
ed568912
KH
143 int generation;
144 int request_generation;
145
146 /* Spinlock for accessing fw_ohci data. Never call out of
147 * this driver with this lock held. */
148 spinlock_t lock;
149 u32 self_id_buffer[512];
150
151 /* Config rom buffers */
152 __be32 *config_rom;
153 dma_addr_t config_rom_bus;
154 __be32 *next_config_rom;
155 dma_addr_t next_config_rom_bus;
156 u32 next_header;
157
158 struct ar_context ar_request_ctx;
159 struct ar_context ar_response_ctx;
160 struct at_context at_request_ctx;
161 struct at_context at_response_ctx;
162
163 u32 it_context_mask;
164 struct iso_context *it_context_list;
165 u32 ir_context_mask;
166 struct iso_context *ir_context_list;
167};
168
95688e97 169static inline struct fw_ohci *fw_ohci(struct fw_card *card)
ed568912
KH
170{
171 return container_of(card, struct fw_ohci, card);
172}
173
174#define CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
175
176#define CONTEXT_RUN 0x8000
177#define CONTEXT_WAKE 0x1000
178#define CONTEXT_DEAD 0x0800
179#define CONTEXT_ACTIVE 0x0400
180
181#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
182#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
183#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
184
185#define FW_OHCI_MAJOR 240
186#define OHCI1394_REGISTER_SIZE 0x800
187#define OHCI_LOOP_COUNT 500
188#define OHCI1394_PCI_HCI_Control 0x40
189#define SELF_ID_BUF_SIZE 0x800
32b46093 190#define OHCI_TCODE_PHY_PACKET 0x0e
0edeefd9 191
ed568912
KH
192static char ohci_driver_name[] = KBUILD_MODNAME;
193
95688e97 194static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
ed568912
KH
195{
196 writel(data, ohci->registers + offset);
197}
198
95688e97 199static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
ed568912
KH
200{
201 return readl(ohci->registers + offset);
202}
203
95688e97 204static inline void flush_writes(const struct fw_ohci *ohci)
ed568912
KH
205{
206 /* Do a dummy read to flush writes. */
207 reg_read(ohci, OHCI1394_Version);
208}
209
210static int
211ohci_update_phy_reg(struct fw_card *card, int addr,
212 int clear_bits, int set_bits)
213{
214 struct fw_ohci *ohci = fw_ohci(card);
215 u32 val, old;
216
217 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
218 msleep(2);
219 val = reg_read(ohci, OHCI1394_PhyControl);
220 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
221 fw_error("failed to set phy reg bits.\n");
222 return -EBUSY;
223 }
224
225 old = OHCI1394_PhyControl_ReadData(val);
226 old = (old & ~clear_bits) | set_bits;
227 reg_write(ohci, OHCI1394_PhyControl,
228 OHCI1394_PhyControl_Write(addr, old));
229
230 return 0;
231}
232
32b46093 233static int ar_context_add_page(struct ar_context *ctx)
ed568912 234{
32b46093
KH
235 struct device *dev = ctx->ohci->card.device;
236 struct ar_buffer *ab;
237 dma_addr_t ab_bus;
238 size_t offset;
239
240 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
241 if (ab == NULL)
242 return -ENOMEM;
243
244 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
245 if (dma_mapping_error(ab_bus)) {
246 free_page((unsigned long) ab);
247 return -ENOMEM;
248 }
249
250 memset(&ab->descriptor, 0, sizeof ab->descriptor);
251 ab->descriptor.control = cpu_to_le16(descriptor_input_more |
252 descriptor_status |
253 descriptor_branch_always);
254 offset = offsetof(struct ar_buffer, data);
255 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
256 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
257 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
258 ab->descriptor.branch_address = 0;
259
260 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
261
262 ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
263 ctx->last_buffer->next = ab;
264 ctx->last_buffer = ab;
265
72e318e0 266 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
ed568912 267 flush_writes(ctx->ohci);
32b46093
KH
268
269 return 0;
ed568912
KH
270}
271
32b46093 272static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
ed568912 273{
ed568912 274 struct fw_ohci *ohci = ctx->ohci;
2639a6fb
KH
275 struct fw_packet p;
276 u32 status, length, tcode;
2639a6fb 277
32b46093
KH
278 p.header[0] = le32_to_cpu(buffer[0]);
279 p.header[1] = le32_to_cpu(buffer[1]);
280 p.header[2] = le32_to_cpu(buffer[2]);
2639a6fb
KH
281
282 tcode = (p.header[0] >> 4) & 0x0f;
283 switch (tcode) {
284 case TCODE_WRITE_QUADLET_REQUEST:
285 case TCODE_READ_QUADLET_RESPONSE:
32b46093 286 p.header[3] = (__force __u32) buffer[3];
2639a6fb 287 p.header_length = 16;
32b46093 288 p.payload_length = 0;
2639a6fb
KH
289 break;
290
2639a6fb 291 case TCODE_READ_BLOCK_REQUEST :
32b46093
KH
292 p.header[3] = le32_to_cpu(buffer[3]);
293 p.header_length = 16;
294 p.payload_length = 0;
295 break;
296
297 case TCODE_WRITE_BLOCK_REQUEST:
2639a6fb
KH
298 case TCODE_READ_BLOCK_RESPONSE:
299 case TCODE_LOCK_REQUEST:
300 case TCODE_LOCK_RESPONSE:
32b46093 301 p.header[3] = le32_to_cpu(buffer[3]);
2639a6fb 302 p.header_length = 16;
32b46093 303 p.payload_length = p.header[3] >> 16;
2639a6fb
KH
304 break;
305
306 case TCODE_WRITE_RESPONSE:
307 case TCODE_READ_QUADLET_REQUEST:
32b46093 308 case OHCI_TCODE_PHY_PACKET:
2639a6fb 309 p.header_length = 12;
32b46093 310 p.payload_length = 0;
2639a6fb
KH
311 break;
312 }
ed568912 313
32b46093
KH
314 p.payload = (void *) buffer + p.header_length;
315
316 /* FIXME: What to do about evt_* errors? */
317 length = (p.header_length + p.payload_length + 3) / 4;
318 status = le32_to_cpu(buffer[length]);
319
320 p.ack = ((status >> 16) & 0x1f) - 16;
321 p.speed = (status >> 21) & 0x7;
322 p.timestamp = status & 0xffff;
323 p.generation = ohci->request_generation;
ed568912
KH
324
325 /* The OHCI bus reset handler synthesizes a phy packet with
326 * the new generation number when a bus reset happens (see
327 * section 8.4.2.3). This helps us determine when a request
328 * was received and make sure we send the response in the same
329 * generation. We only need this for requests; for responses
330 * we use the unique tlabel for finding the matching
331 * request. */
332
2639a6fb 333 if (p.ack + 16 == 0x09)
32b46093 334 ohci->request_generation = (buffer[2] >> 16) & 0xff;
ed568912 335 else if (ctx == &ohci->ar_request_ctx)
2639a6fb 336 fw_core_handle_request(&ohci->card, &p);
ed568912 337 else
2639a6fb 338 fw_core_handle_response(&ohci->card, &p);
ed568912 339
32b46093
KH
340 return buffer + length + 1;
341}
ed568912 342
32b46093
KH
343static void ar_context_tasklet(unsigned long data)
344{
345 struct ar_context *ctx = (struct ar_context *)data;
346 struct fw_ohci *ohci = ctx->ohci;
347 struct ar_buffer *ab;
348 struct descriptor *d;
349 void *buffer, *end;
350
351 ab = ctx->current_buffer;
352 d = &ab->descriptor;
353
354 if (d->res_count == 0) {
355 size_t size, rest, offset;
356
357 /* This descriptor is finished and we may have a
358 * packet split across this and the next buffer. We
359 * reuse the page for reassembling the split packet. */
360
361 offset = offsetof(struct ar_buffer, data);
362 dma_unmap_single(ohci->card.device,
363 ab->descriptor.data_address - offset,
364 PAGE_SIZE, DMA_BIDIRECTIONAL);
365
366 buffer = ab;
367 ab = ab->next;
368 d = &ab->descriptor;
369 size = buffer + PAGE_SIZE - ctx->pointer;
370 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
371 memmove(buffer, ctx->pointer, size);
372 memcpy(buffer + size, ab->data, rest);
373 ctx->current_buffer = ab;
374 ctx->pointer = (void *) ab->data + rest;
375 end = buffer + size + rest;
376
377 while (buffer < end)
378 buffer = handle_ar_packet(ctx, buffer);
379
380 free_page((unsigned long)buffer);
381 ar_context_add_page(ctx);
382 } else {
383 buffer = ctx->pointer;
384 ctx->pointer = end =
385 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
386
387 while (buffer < end)
388 buffer = handle_ar_packet(ctx, buffer);
389 }
ed568912
KH
390}
391
392static int
72e318e0 393ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
ed568912 394{
32b46093 395 struct ar_buffer ab;
ed568912 396
72e318e0
KH
397 ctx->regs = regs;
398 ctx->ohci = ohci;
399 ctx->last_buffer = &ab;
ed568912
KH
400 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
401
32b46093
KH
402 ar_context_add_page(ctx);
403 ar_context_add_page(ctx);
404 ctx->current_buffer = ab.next;
405 ctx->pointer = ctx->current_buffer->data;
406
72e318e0
KH
407 reg_write(ctx->ohci, command_ptr(ctx->regs), ab.descriptor.branch_address);
408 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_RUN);
32b46093 409 flush_writes(ctx->ohci);
ed568912
KH
410
411 return 0;
412}
30200739
KH
413
414static void context_tasklet(unsigned long data)
415{
416 struct context *ctx = (struct context *) data;
417 struct fw_ohci *ohci = ctx->ohci;
418 struct descriptor *d, *last;
419 u32 address;
420 int z;
421
422 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
423 ctx->buffer_size, DMA_TO_DEVICE);
424
425 d = ctx->tail_descriptor;
426 last = ctx->tail_descriptor_last;
427
428 while (last->branch_address != 0) {
429 address = le32_to_cpu(last->branch_address);
430 z = address & 0xf;
431 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d;
432 last = (z == 2) ? d : d + z - 1;
433
434 if (!ctx->callback(ctx, d, last))
435 break;
436
437 ctx->tail_descriptor = d;
438 ctx->tail_descriptor_last = last;
439 }
440}
441
442static int
443context_init(struct context *ctx, struct fw_ohci *ohci,
444 size_t buffer_size, u32 regs,
445 descriptor_callback_t callback)
446{
447 ctx->ohci = ohci;
448 ctx->regs = regs;
449 ctx->buffer_size = buffer_size;
450 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
451 if (ctx->buffer == NULL)
452 return -ENOMEM;
453
454 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
455 ctx->callback = callback;
456
457 ctx->buffer_bus =
458 dma_map_single(ohci->card.device, ctx->buffer,
459 buffer_size, DMA_TO_DEVICE);
460 if (dma_mapping_error(ctx->buffer_bus)) {
461 kfree(ctx->buffer);
462 return -ENOMEM;
463 }
464
465 ctx->head_descriptor = ctx->buffer;
466 ctx->prev_descriptor = ctx->buffer;
467 ctx->tail_descriptor = ctx->buffer;
468 ctx->tail_descriptor_last = ctx->buffer;
469
470 /* We put a dummy descriptor in the buffer that has a NULL
471 * branch address and looks like it's been sent. That way we
472 * have a descriptor to append DMA programs to. Also, the
473 * ring buffer invariant is that it always has at least one
474 * element so that head == tail means buffer full. */
475
476 memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor);
477 ctx->head_descriptor->control = cpu_to_le16(descriptor_output_last);
478 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
479 ctx->head_descriptor++;
480
481 return 0;
482}
483
484 static void
485context_release(struct context *ctx)
486{
487 struct fw_card *card = &ctx->ohci->card;
488
489 dma_unmap_single(card->device, ctx->buffer_bus,
490 ctx->buffer_size, DMA_TO_DEVICE);
491 kfree(ctx->buffer);
492}
493
494static struct descriptor *
495context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
496{
497 struct descriptor *d, *tail, *end;
498
499 d = ctx->head_descriptor;
500 tail = ctx->tail_descriptor;
501 end = ctx->buffer + ctx->buffer_size / sizeof(struct descriptor);
502
503 if (d + z <= tail) {
504 goto has_space;
505 } else if (d > tail && d + z <= end) {
506 goto has_space;
507 } else if (d > tail && ctx->buffer + z <= tail) {
508 d = ctx->buffer;
509 goto has_space;
510 }
511
512 return NULL;
513
514 has_space:
515 memset(d, 0, z * sizeof *d);
516 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
517
518 return d;
519}
520
521static void context_run(struct context *ctx, u32 cycle_match)
522{
523 struct fw_ohci *ohci = ctx->ohci;
524
525 reg_write(ohci, command_ptr(ctx->regs),
526 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
527 reg_write(ohci, control_clear(ctx->regs), ~0);
528 reg_write(ohci, control_set(ctx->regs), CONTEXT_RUN | cycle_match);
529 flush_writes(ohci);
530}
531
532static void context_append(struct context *ctx,
533 struct descriptor *d, int z, int extra)
534{
535 dma_addr_t d_bus;
536
537 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d;
538
539 ctx->head_descriptor = d + z + extra;
540 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
541 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
542
543 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
544 ctx->buffer_size, DMA_TO_DEVICE);
545
546 reg_write(ctx->ohci, control_set(ctx->regs), CONTEXT_WAKE);
547 flush_writes(ctx->ohci);
548}
549
550static void context_stop(struct context *ctx)
551{
552 u32 reg;
553
554 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
555
556 reg = reg_read(ctx->ohci, control_set(ctx->regs));
557 if (reg & CONTEXT_ACTIVE)
558 fw_notify("Tried to stop context, but it is still active "
559 "(0x%08x).\n", reg);
560}
ed568912
KH
561
562static void
563do_packet_callbacks(struct fw_ohci *ohci, struct list_head *list)
564{
565 struct fw_packet *p, *next;
566
567 list_for_each_entry_safe(p, next, list, link)
2639a6fb 568 p->callback(p, &ohci->card, p->ack);
ed568912
KH
569}
570
571static void
572complete_transmission(struct fw_packet *packet,
2639a6fb 573 int ack, struct list_head *list)
ed568912
KH
574{
575 list_move_tail(&packet->link, list);
2639a6fb 576 packet->ack = ack;
ed568912
KH
577}
578
579/* This function prepares the first packet in the context queue for
580 * transmission. Must always be called with the ochi->lock held to
581 * ensure proper generation handling and locking around packet queue
582 * manipulation. */
583static void
584at_context_setup_packet(struct at_context *ctx, struct list_head *list)
585{
586 struct fw_packet *packet;
587 struct fw_ohci *ohci = ctx->ohci;
588 int z, tcode;
589
590 packet = fw_packet(ctx->list.next);
591
592 memset(&ctx->d, 0, sizeof ctx->d);
593 if (packet->payload_length > 0) {
594 packet->payload_bus = dma_map_single(ohci->card.device,
595 packet->payload,
596 packet->payload_length,
597 DMA_TO_DEVICE);
82eff9db 598 if (dma_mapping_error(packet->payload_bus)) {
e5f49c3b 599 complete_transmission(packet, RCODE_SEND_ERROR, list);
ed568912
KH
600 return;
601 }
602
603 ctx->d.more.control =
604 cpu_to_le16(descriptor_output_more |
605 descriptor_key_immediate);
606 ctx->d.more.req_count = cpu_to_le16(packet->header_length);
607 ctx->d.more.res_count = cpu_to_le16(packet->timestamp);
608 ctx->d.last.control =
609 cpu_to_le16(descriptor_output_last |
610 descriptor_irq_always |
611 descriptor_branch_always);
612 ctx->d.last.req_count = cpu_to_le16(packet->payload_length);
613 ctx->d.last.data_address = cpu_to_le32(packet->payload_bus);
614 z = 3;
615 } else {
616 ctx->d.more.control =
617 cpu_to_le16(descriptor_output_last |
618 descriptor_key_immediate |
619 descriptor_irq_always |
620 descriptor_branch_always);
621 ctx->d.more.req_count = cpu_to_le16(packet->header_length);
622 ctx->d.more.res_count = cpu_to_le16(packet->timestamp);
623 z = 2;
624 }
625
626 /* The DMA format for asyncronous link packets is different
627 * from the IEEE1394 layout, so shift the fields around
628 * accordingly. If header_length is 8, it's a PHY packet, to
629 * which we need to prepend an extra quadlet. */
630 if (packet->header_length > 8) {
631 ctx->d.header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
632 (packet->speed << 16));
633 ctx->d.header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
634 (packet->header[0] & 0xffff0000));
635 ctx->d.header[2] = cpu_to_le32(packet->header[2]);
636
637 tcode = (packet->header[0] >> 4) & 0x0f;
638 if (TCODE_IS_BLOCK_PACKET(tcode))
639 ctx->d.header[3] = cpu_to_le32(packet->header[3]);
640 else
641 ctx->d.header[3] = packet->header[3];
642 } else {
643 ctx->d.header[0] =
644 cpu_to_le32((OHCI1394_phy_tcode << 4) |
645 (packet->speed << 16));
646 ctx->d.header[1] = cpu_to_le32(packet->header[0]);
647 ctx->d.header[2] = cpu_to_le32(packet->header[1]);
648 ctx->d.more.req_count = cpu_to_le16(12);
649 }
650
651 /* FIXME: Document how the locking works. */
652 if (ohci->generation == packet->generation) {
72e318e0 653 reg_write(ctx->ohci, command_ptr(ctx->regs),
ed568912 654 ctx->descriptor_bus | z);
72e318e0 655 reg_write(ctx->ohci, control_set(ctx->regs),
ed568912 656 CONTEXT_RUN | CONTEXT_WAKE);
730c32f5 657 ctx->current_packet = packet;
ed568912
KH
658 } else {
659 /* We dont return error codes from this function; all
660 * transmission errors are reported through the
661 * callback. */
e5f49c3b 662 complete_transmission(packet, RCODE_GENERATION, list);
ed568912
KH
663 }
664}
665
666static void at_context_stop(struct at_context *ctx)
667{
668 u32 reg;
669
72e318e0 670 reg_write(ctx->ohci, control_clear(ctx->regs), CONTEXT_RUN);
ed568912 671
72e318e0 672 reg = reg_read(ctx->ohci, control_set(ctx->regs));
ed568912
KH
673 if (reg & CONTEXT_ACTIVE)
674 fw_notify("Tried to stop context, but it is still active "
675 "(0x%08x).\n", reg);
676}
677
678static void at_context_tasklet(unsigned long data)
679{
680 struct at_context *ctx = (struct at_context *)data;
681 struct fw_ohci *ohci = ctx->ohci;
682 struct fw_packet *packet;
683 LIST_HEAD(list);
684 unsigned long flags;
685 int evt;
686
687 spin_lock_irqsave(&ohci->lock, flags);
688
689 packet = fw_packet(ctx->list.next);
690
691 at_context_stop(ctx);
692
730c32f5
KH
693 /* If the head of the list isn't the packet that just got
694 * transmitted, the packet got cancelled before we finished
695 * transmitting it. */
696 if (ctx->current_packet != packet)
697 goto skip_to_next;
698
ed568912
KH
699 if (packet->payload_length > 0) {
700 dma_unmap_single(ohci->card.device, packet->payload_bus,
701 packet->payload_length, DMA_TO_DEVICE);
702 evt = le16_to_cpu(ctx->d.last.transfer_status) & 0x1f;
703 packet->timestamp = le16_to_cpu(ctx->d.last.res_count);
704 }
705 else {
706 evt = le16_to_cpu(ctx->d.more.transfer_status) & 0x1f;
707 packet->timestamp = le16_to_cpu(ctx->d.more.res_count);
708 }
709
710 if (evt < 16) {
711 switch (evt) {
712 case OHCI1394_evt_timeout:
713 /* Async response transmit timed out. */
e5f49c3b 714 complete_transmission(packet, RCODE_CANCELLED, &list);
ed568912
KH
715 break;
716
717 case OHCI1394_evt_flushed:
718 /* The packet was flushed should give same
719 * error as when we try to use a stale
720 * generation count. */
e5f49c3b
KH
721 complete_transmission(packet,
722 RCODE_GENERATION, &list);
ed568912
KH
723 break;
724
725 case OHCI1394_evt_missing_ack:
e5f49c3b
KH
726 /* Using a valid (current) generation count,
727 * but the node is not on the bus or not
728 * sending acks. */
729 complete_transmission(packet, RCODE_NO_ACK, &list);
ed568912
KH
730 break;
731
732 default:
e5f49c3b 733 complete_transmission(packet, RCODE_SEND_ERROR, &list);
ed568912
KH
734 break;
735 }
736 } else
737 complete_transmission(packet, evt - 16, &list);
738
730c32f5 739 skip_to_next:
ed568912
KH
740 /* If more packets are queued, set up the next one. */
741 if (!list_empty(&ctx->list))
742 at_context_setup_packet(ctx, &list);
743
744 spin_unlock_irqrestore(&ohci->lock, flags);
745
746 do_packet_callbacks(ohci, &list);
747}
748
749static int
72e318e0 750at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 regs)
ed568912
KH
751{
752 INIT_LIST_HEAD(&ctx->list);
753
754 ctx->descriptor_bus =
755 dma_map_single(ohci->card.device, &ctx->d,
756 sizeof ctx->d, DMA_TO_DEVICE);
82eff9db 757 if (dma_mapping_error(ctx->descriptor_bus))
ed568912
KH
758 return -ENOMEM;
759
72e318e0
KH
760 ctx->regs = regs;
761 ctx->ohci = ohci;
ed568912
KH
762
763 tasklet_init(&ctx->tasklet, at_context_tasklet, (unsigned long)ctx);
764
765 return 0;
766}
767
e636fe25 768#define header_get_destination(q) (((q) >> 16) & 0xffff)
93c4cceb
KH
769#define header_get_tcode(q) (((q) >> 4) & 0x0f)
770#define header_get_offset_high(q) (((q) >> 0) & 0xffff)
771#define header_get_data_length(q) (((q) >> 16) & 0xffff)
772#define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
773
774static void
775handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
776{
777 struct fw_packet response;
778 int tcode, length, i;
779
780 tcode = header_get_tcode(packet->header[0]);
781 if (TCODE_IS_BLOCK_PACKET(tcode))
782 length = header_get_data_length(packet->header[3]);
783 else
784 length = 4;
785
786 i = csr - CSR_CONFIG_ROM;
787 if (i + length > CONFIG_ROM_SIZE) {
788 fw_fill_response(&response, packet->header,
789 RCODE_ADDRESS_ERROR, NULL, 0);
790 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
791 fw_fill_response(&response, packet->header,
792 RCODE_TYPE_ERROR, NULL, 0);
793 } else {
794 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
795 (void *) ohci->config_rom + i, length);
796 }
797
798 fw_core_handle_response(&ohci->card, &response);
799}
800
801static void
802handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
803{
804 struct fw_packet response;
805 int tcode, length, ext_tcode, sel;
806 __be32 *payload, lock_old;
807 u32 lock_arg, lock_data;
808
809 tcode = header_get_tcode(packet->header[0]);
810 length = header_get_data_length(packet->header[3]);
811 payload = packet->payload;
812 ext_tcode = header_get_extended_tcode(packet->header[3]);
813
814 if (tcode == TCODE_LOCK_REQUEST &&
815 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
816 lock_arg = be32_to_cpu(payload[0]);
817 lock_data = be32_to_cpu(payload[1]);
818 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
819 lock_arg = 0;
820 lock_data = 0;
821 } else {
822 fw_fill_response(&response, packet->header,
823 RCODE_TYPE_ERROR, NULL, 0);
824 goto out;
825 }
826
827 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
828 reg_write(ohci, OHCI1394_CSRData, lock_data);
829 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
830 reg_write(ohci, OHCI1394_CSRControl, sel);
831
832 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
833 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
834 else
835 fw_notify("swap not done yet\n");
836
837 fw_fill_response(&response, packet->header,
838 RCODE_COMPLETE, &lock_old, sizeof lock_old);
839 out:
840 fw_core_handle_response(&ohci->card, &response);
841}
842
843static void
844handle_local_request(struct at_context *ctx, struct fw_packet *packet)
845{
846 u64 offset;
847 u32 csr;
848
849 packet->ack = ACK_PENDING;
850 packet->callback(packet, &ctx->ohci->card, packet->ack);
851
852 offset =
853 ((unsigned long long)
854 header_get_offset_high(packet->header[1]) << 32) |
855 packet->header[2];
856 csr = offset - CSR_REGISTER_BASE;
857
858 /* Handle config rom reads. */
859 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
860 handle_local_rom(ctx->ohci, packet, csr);
861 else switch (csr) {
862 case CSR_BUS_MANAGER_ID:
863 case CSR_BANDWIDTH_AVAILABLE:
864 case CSR_CHANNELS_AVAILABLE_HI:
865 case CSR_CHANNELS_AVAILABLE_LO:
866 handle_local_lock(ctx->ohci, packet, csr);
867 break;
868 default:
869 if (ctx == &ctx->ohci->at_request_ctx)
870 fw_core_handle_request(&ctx->ohci->card, packet);
871 else
872 fw_core_handle_response(&ctx->ohci->card, packet);
873 break;
874 }
875}
e636fe25 876
ed568912
KH
877static void
878at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
879{
880 LIST_HEAD(list);
881 unsigned long flags;
ed568912
KH
882
883 spin_lock_irqsave(&ctx->ohci->lock, flags);
884
e636fe25
KH
885 if (header_get_destination(packet->header[0]) == ctx->ohci->node_id &&
886 ctx->ohci->generation == packet->generation) {
93c4cceb
KH
887 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
888 handle_local_request(ctx, packet);
889 return;
e636fe25 890 }
ed568912 891
93c4cceb
KH
892 list_add_tail(&packet->link, &ctx->list);
893 if (ctx->list.next == &packet->link)
894 at_context_setup_packet(ctx, &list);
895
ed568912
KH
896 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
897
898 do_packet_callbacks(ctx->ohci, &list);
899}
900
901static void bus_reset_tasklet(unsigned long data)
902{
903 struct fw_ohci *ohci = (struct fw_ohci *)data;
e636fe25 904 int self_id_count, i, j, reg;
ed568912
KH
905 int generation, new_generation;
906 unsigned long flags;
907
908 reg = reg_read(ohci, OHCI1394_NodeID);
909 if (!(reg & OHCI1394_NodeID_idValid)) {
910 fw_error("node ID not valid, new bus reset in progress\n");
911 return;
912 }
e636fe25 913 ohci->node_id = reg & 0xffff;
ed568912
KH
914
915 /* The count in the SelfIDCount register is the number of
916 * bytes in the self ID receive buffer. Since we also receive
917 * the inverted quadlets and a header quadlet, we shift one
918 * bit extra to get the actual number of self IDs. */
919
920 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
921 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
922
923 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
924 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
925 fw_error("inconsistent self IDs\n");
926 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
927 }
928
929 /* Check the consistency of the self IDs we just read. The
930 * problem we face is that a new bus reset can start while we
931 * read out the self IDs from the DMA buffer. If this happens,
932 * the DMA buffer will be overwritten with new self IDs and we
933 * will read out inconsistent data. The OHCI specification
934 * (section 11.2) recommends a technique similar to
935 * linux/seqlock.h, where we remember the generation of the
936 * self IDs in the buffer before reading them out and compare
937 * it to the current generation after reading them out. If
938 * the two generations match we know we have a consistent set
939 * of self IDs. */
940
941 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
942 if (new_generation != generation) {
943 fw_notify("recursive bus reset detected, "
944 "discarding self ids\n");
945 return;
946 }
947
948 /* FIXME: Document how the locking works. */
949 spin_lock_irqsave(&ohci->lock, flags);
950
951 ohci->generation = generation;
952 at_context_stop(&ohci->at_request_ctx);
953 at_context_stop(&ohci->at_response_ctx);
954 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
955
956 /* This next bit is unrelated to the AT context stuff but we
957 * have to do it under the spinlock also. If a new config rom
958 * was set up before this reset, the old one is now no longer
959 * in use and we can free it. Update the config rom pointers
960 * to point to the current config rom and clear the
961 * next_config_rom pointer so a new udpate can take place. */
962
963 if (ohci->next_config_rom != NULL) {
964 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
965 ohci->config_rom, ohci->config_rom_bus);
966 ohci->config_rom = ohci->next_config_rom;
967 ohci->config_rom_bus = ohci->next_config_rom_bus;
968 ohci->next_config_rom = NULL;
969
970 /* Restore config_rom image and manually update
971 * config_rom registers. Writing the header quadlet
972 * will indicate that the config rom is ready, so we
973 * do that last. */
974 reg_write(ohci, OHCI1394_BusOptions,
975 be32_to_cpu(ohci->config_rom[2]));
976 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
977 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
978 }
979
980 spin_unlock_irqrestore(&ohci->lock, flags);
981
e636fe25 982 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
ed568912
KH
983 self_id_count, ohci->self_id_buffer);
984}
985
986static irqreturn_t irq_handler(int irq, void *data)
987{
988 struct fw_ohci *ohci = data;
989 u32 event, iso_event;
990 int i;
991
992 event = reg_read(ohci, OHCI1394_IntEventClear);
993
994 if (!event)
995 return IRQ_NONE;
996
997 reg_write(ohci, OHCI1394_IntEventClear, event);
998
999 if (event & OHCI1394_selfIDComplete)
1000 tasklet_schedule(&ohci->bus_reset_tasklet);
1001
1002 if (event & OHCI1394_RQPkt)
1003 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1004
1005 if (event & OHCI1394_RSPkt)
1006 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1007
1008 if (event & OHCI1394_reqTxComplete)
1009 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1010
1011 if (event & OHCI1394_respTxComplete)
1012 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1013
c889475f 1014 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
ed568912
KH
1015 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1016
1017 while (iso_event) {
1018 i = ffs(iso_event) - 1;
30200739 1019 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
ed568912
KH
1020 iso_event &= ~(1 << i);
1021 }
1022
c889475f 1023 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
ed568912
KH
1024 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1025
1026 while (iso_event) {
1027 i = ffs(iso_event) - 1;
30200739 1028 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
ed568912
KH
1029 iso_event &= ~(1 << i);
1030 }
1031
1032 return IRQ_HANDLED;
1033}
1034
1035static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1036{
1037 struct fw_ohci *ohci = fw_ohci(card);
1038 struct pci_dev *dev = to_pci_dev(card->device);
1039
1040 /* When the link is not yet enabled, the atomic config rom
1041 * update mechanism described below in ohci_set_config_rom()
1042 * is not active. We have to update ConfigRomHeader and
1043 * BusOptions manually, and the write to ConfigROMmap takes
1044 * effect immediately. We tie this to the enabling of the
1045 * link, so we have a valid config rom before enabling - the
1046 * OHCI requires that ConfigROMhdr and BusOptions have valid
1047 * values before enabling.
1048 *
1049 * However, when the ConfigROMmap is written, some controllers
1050 * always read back quadlets 0 and 2 from the config rom to
1051 * the ConfigRomHeader and BusOptions registers on bus reset.
1052 * They shouldn't do that in this initial case where the link
1053 * isn't enabled. This means we have to use the same
1054 * workaround here, setting the bus header to 0 and then write
1055 * the right values in the bus reset tasklet.
1056 */
1057
1058 ohci->next_config_rom =
1059 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1060 &ohci->next_config_rom_bus, GFP_KERNEL);
1061 if (ohci->next_config_rom == NULL)
1062 return -ENOMEM;
1063
1064 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1065 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1066
1067 ohci->next_header = config_rom[0];
1068 ohci->next_config_rom[0] = 0;
1069 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1070 reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1071 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1072
1073 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1074
1075 if (request_irq(dev->irq, irq_handler,
1076 SA_SHIRQ, ohci_driver_name, ohci)) {
1077 fw_error("Failed to allocate shared interrupt %d.\n",
1078 dev->irq);
1079 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1080 ohci->config_rom, ohci->config_rom_bus);
1081 return -EIO;
1082 }
1083
1084 reg_write(ohci, OHCI1394_HCControlSet,
1085 OHCI1394_HCControl_linkEnable |
1086 OHCI1394_HCControl_BIBimageValid);
1087 flush_writes(ohci);
1088
1089 /* We are ready to go, initiate bus reset to finish the
1090 * initialization. */
1091
1092 fw_core_initiate_bus_reset(&ohci->card, 1);
1093
1094 return 0;
1095}
1096
1097static int
1098ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1099{
1100 struct fw_ohci *ohci;
1101 unsigned long flags;
1102 int retval = 0;
1103 __be32 *next_config_rom;
1104 dma_addr_t next_config_rom_bus;
1105
1106 ohci = fw_ohci(card);
1107
1108 /* When the OHCI controller is enabled, the config rom update
1109 * mechanism is a bit tricky, but easy enough to use. See
1110 * section 5.5.6 in the OHCI specification.
1111 *
1112 * The OHCI controller caches the new config rom address in a
1113 * shadow register (ConfigROMmapNext) and needs a bus reset
1114 * for the changes to take place. When the bus reset is
1115 * detected, the controller loads the new values for the
1116 * ConfigRomHeader and BusOptions registers from the specified
1117 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1118 * shadow register. All automatically and atomically.
1119 *
1120 * Now, there's a twist to this story. The automatic load of
1121 * ConfigRomHeader and BusOptions doesn't honor the
1122 * noByteSwapData bit, so with a be32 config rom, the
1123 * controller will load be32 values in to these registers
1124 * during the atomic update, even on litte endian
1125 * architectures. The workaround we use is to put a 0 in the
1126 * header quadlet; 0 is endian agnostic and means that the
1127 * config rom isn't ready yet. In the bus reset tasklet we
1128 * then set up the real values for the two registers.
1129 *
1130 * We use ohci->lock to avoid racing with the code that sets
1131 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1132 */
1133
1134 next_config_rom =
1135 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1136 &next_config_rom_bus, GFP_KERNEL);
1137 if (next_config_rom == NULL)
1138 return -ENOMEM;
1139
1140 spin_lock_irqsave(&ohci->lock, flags);
1141
1142 if (ohci->next_config_rom == NULL) {
1143 ohci->next_config_rom = next_config_rom;
1144 ohci->next_config_rom_bus = next_config_rom_bus;
1145
1146 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1147 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1148 length * 4);
1149
1150 ohci->next_header = config_rom[0];
1151 ohci->next_config_rom[0] = 0;
1152
1153 reg_write(ohci, OHCI1394_ConfigROMmap,
1154 ohci->next_config_rom_bus);
1155 } else {
1156 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1157 next_config_rom, next_config_rom_bus);
1158 retval = -EBUSY;
1159 }
1160
1161 spin_unlock_irqrestore(&ohci->lock, flags);
1162
1163 /* Now initiate a bus reset to have the changes take
1164 * effect. We clean up the old config rom memory and DMA
1165 * mappings in the bus reset tasklet, since the OHCI
1166 * controller could need to access it before the bus reset
1167 * takes effect. */
1168 if (retval == 0)
1169 fw_core_initiate_bus_reset(&ohci->card, 1);
1170
1171 return retval;
1172}
1173
1174static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1175{
1176 struct fw_ohci *ohci = fw_ohci(card);
1177
1178 at_context_transmit(&ohci->at_request_ctx, packet);
1179}
1180
1181static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1182{
1183 struct fw_ohci *ohci = fw_ohci(card);
1184
1185 at_context_transmit(&ohci->at_response_ctx, packet);
1186}
1187
730c32f5
KH
1188static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1189{
1190 struct fw_ohci *ohci = fw_ohci(card);
1191 LIST_HEAD(list);
1192 unsigned long flags;
1193
1194 spin_lock_irqsave(&ohci->lock, flags);
1195
1196 if (packet->ack == 0) {
1197 fw_notify("cancelling packet %p (header[0]=%08x)\n",
1198 packet, packet->header[0]);
1199
1200 complete_transmission(packet, RCODE_CANCELLED, &list);
1201 }
1202
1203 spin_unlock_irqrestore(&ohci->lock, flags);
1204
1205 do_packet_callbacks(ohci, &list);
1206
1207 /* Return success if we actually cancelled something. */
1208 return list_empty(&list) ? -ENOENT : 0;
1209}
1210
ed568912
KH
1211static int
1212ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1213{
1214 struct fw_ohci *ohci = fw_ohci(card);
1215 unsigned long flags;
907293d7 1216 int n, retval = 0;
ed568912 1217
907293d7
SR
1218 /* FIXME: Make sure this bitmask is cleared when we clear the busReset
1219 * interrupt bit. Clear physReqResourceAllBuses on bus reset. */
ed568912
KH
1220
1221 spin_lock_irqsave(&ohci->lock, flags);
1222
1223 if (ohci->generation != generation) {
1224 retval = -ESTALE;
1225 goto out;
1226 }
1227
907293d7
SR
1228 /* NOTE, if the node ID contains a non-local bus ID, physical DMA is
1229 * enabled for _all_ nodes on remote buses. */
1230
1231 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1232 if (n < 32)
1233 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1234 else
1235 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1236
ed568912 1237 flush_writes(ohci);
ed568912 1238 out:
6cad95fe 1239 spin_unlock_irqrestore(&ohci->lock, flags);
ed568912
KH
1240 return retval;
1241}
1242
1243static void ir_context_tasklet(unsigned long data)
1244{
1245 struct iso_context *ctx = (struct iso_context *)data;
1246
1247 (void)ctx;
1248}
1249
1250#define ISO_BUFFER_SIZE (64 * 1024)
1251
30200739
KH
1252static int handle_it_packet(struct context *context,
1253 struct descriptor *d,
1254 struct descriptor *last)
ed568912 1255{
30200739
KH
1256 struct iso_context *ctx =
1257 container_of(context, struct iso_context, context);
1258
1259 if (last->transfer_status == 0)
1260 /* This descriptor isn't done yet, stop iteration. */
1261 return 0;
1262
1263 if (le16_to_cpu(last->control) & descriptor_irq_always)
1264 ctx->base.callback(&ctx->base,
1265 0, le16_to_cpu(last->res_count),
1266 ctx->base.callback_data);
1267
1268 return 1;
ed568912
KH
1269}
1270
30200739
KH
1271static struct fw_iso_context *
1272ohci_allocate_iso_context(struct fw_card *card, int type)
ed568912
KH
1273{
1274 struct fw_ohci *ohci = fw_ohci(card);
1275 struct iso_context *ctx, *list;
30200739 1276 descriptor_callback_t callback;
ed568912
KH
1277 u32 *mask;
1278 unsigned long flags;
30200739 1279 int index, retval;
ed568912
KH
1280
1281 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1282 mask = &ohci->it_context_mask;
1283 list = ohci->it_context_list;
30200739 1284 callback = handle_it_packet;
ed568912 1285 } else {
30200739 1286 return ERR_PTR(-EINVAL);
ed568912
KH
1287 }
1288
1289 spin_lock_irqsave(&ohci->lock, flags);
1290 index = ffs(*mask) - 1;
1291 if (index >= 0)
1292 *mask &= ~(1 << index);
1293 spin_unlock_irqrestore(&ohci->lock, flags);
1294
1295 if (index < 0)
1296 return ERR_PTR(-EBUSY);
1297
1298 ctx = &list[index];
1299 memset(ctx, 0, sizeof *ctx);
30200739
KH
1300 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1301 OHCI1394_IsoXmitContextBase(index), callback);
1302 if (retval < 0) {
1303 spin_lock_irqsave(&ohci->lock, flags);
1304 *mask |= 1 << index;
1305 spin_unlock_irqrestore(&ohci->lock, flags);
1306 return ERR_PTR(retval);
1307 }
ed568912
KH
1308
1309 return &ctx->base;
1310}
1311
1312static int ohci_send_iso(struct fw_iso_context *base, s32 cycle)
1313{
30200739
KH
1314 struct iso_context *ctx = container_of(base, struct iso_context, base);
1315 struct fw_ohci *ohci = ctx->context.ohci;
ed568912
KH
1316 u32 cycle_match = 0;
1317 int index;
1318
1319 index = ctx - ohci->it_context_list;
1320 if (cycle > 0)
1321 cycle_match = CONTEXT_CYCLE_MATCH_ENABLE |
1322 (cycle & 0x7fff) << 16;
1323
1324 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
30200739 1325 context_run(&ctx->context, cycle_match);
ed568912
KH
1326
1327 return 0;
1328}
1329
1330static void ohci_free_iso_context(struct fw_iso_context *base)
1331{
1332 struct fw_ohci *ohci = fw_ohci(base->card);
30200739 1333 struct iso_context *ctx = container_of(base, struct iso_context, base);
ed568912
KH
1334 unsigned long flags;
1335 int index;
1336
ed568912
KH
1337 spin_lock_irqsave(&ohci->lock, flags);
1338
1339 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1340 index = ctx - ohci->it_context_list;
1341 reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0);
1342 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1343 ohci->it_context_mask |= 1 << index;
1344 } else {
1345 index = ctx - ohci->ir_context_list;
1346 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(index), ~0);
1347 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1348 ohci->ir_context_mask |= 1 << index;
1349 }
1350 flush_writes(ohci);
1351
30200739 1352 context_release(&ctx->context);
ed568912
KH
1353
1354 spin_unlock_irqrestore(&ohci->lock, flags);
1355}
1356
1357static int
1358ohci_queue_iso(struct fw_iso_context *base,
9aad8125
KH
1359 struct fw_iso_packet *packet,
1360 struct fw_iso_buffer *buffer,
1361 unsigned long payload)
ed568912 1362{
30200739
KH
1363 struct iso_context *ctx = container_of(base, struct iso_context, base);
1364 struct descriptor *d, *last, *pd;
ed568912
KH
1365 struct fw_iso_packet *p;
1366 __le32 *header;
9aad8125 1367 dma_addr_t d_bus, page_bus;
ed568912
KH
1368 u32 z, header_z, payload_z, irq;
1369 u32 payload_index, payload_end_index, next_page_index;
30200739 1370 int page, end_page, i, length, offset;
ed568912
KH
1371
1372 /* FIXME: Cycle lost behavior should be configurable: lose
1373 * packet, retransmit or terminate.. */
1374
1375 p = packet;
9aad8125 1376 payload_index = payload;
ed568912
KH
1377
1378 if (p->skip)
1379 z = 1;
1380 else
1381 z = 2;
1382 if (p->header_length > 0)
1383 z++;
1384
1385 /* Determine the first page the payload isn't contained in. */
1386 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1387 if (p->payload_length > 0)
1388 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1389 else
1390 payload_z = 0;
1391
1392 z += payload_z;
1393
1394 /* Get header size in number of descriptors. */
1395 header_z = DIV_ROUND_UP(p->header_length, sizeof *d);
1396
30200739
KH
1397 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1398 if (d == NULL)
1399 return -ENOMEM;
ed568912
KH
1400
1401 if (!p->skip) {
1402 d[0].control = cpu_to_le16(descriptor_key_immediate);
1403 d[0].req_count = cpu_to_le16(8);
1404
1405 header = (__le32 *) &d[1];
1406 header[0] = cpu_to_le32(it_header_sy(p->sy) |
1407 it_header_tag(p->tag) |
1408 it_header_tcode(TCODE_STREAM_DATA) |
1409 it_header_channel(ctx->base.channel) |
1410 it_header_speed(ctx->base.speed));
1411 header[1] =
1412 cpu_to_le32(it_header_data_length(p->header_length +
1413 p->payload_length));
1414 }
1415
1416 if (p->header_length > 0) {
1417 d[2].req_count = cpu_to_le16(p->header_length);
1418 d[2].data_address = cpu_to_le32(d_bus + z * sizeof *d);
1419 memcpy(&d[z], p->header, p->header_length);
1420 }
1421
1422 pd = d + z - payload_z;
1423 payload_end_index = payload_index + p->payload_length;
1424 for (i = 0; i < payload_z; i++) {
1425 page = payload_index >> PAGE_SHIFT;
1426 offset = payload_index & ~PAGE_MASK;
1427 next_page_index = (page + 1) << PAGE_SHIFT;
1428 length =
1429 min(next_page_index, payload_end_index) - payload_index;
1430 pd[i].req_count = cpu_to_le16(length);
9aad8125
KH
1431
1432 page_bus = page_private(buffer->pages[page]);
1433 pd[i].data_address = cpu_to_le32(page_bus + offset);
ed568912
KH
1434
1435 payload_index += length;
1436 }
1437
ed568912
KH
1438 if (p->interrupt)
1439 irq = descriptor_irq_always;
1440 else
1441 irq = descriptor_no_irq;
1442
30200739 1443 last = z == 2 ? d : d + z - 1;
cbb59da7
KH
1444 last->control |= cpu_to_le16(descriptor_output_last |
1445 descriptor_status |
1446 descriptor_branch_always |
1447 irq);
ed568912 1448
30200739 1449 context_append(&ctx->context, d, z, header_z);
ed568912
KH
1450
1451 return 0;
1452}
1453
21ebcd12 1454static const struct fw_card_driver ohci_driver = {
ed568912
KH
1455 .name = ohci_driver_name,
1456 .enable = ohci_enable,
1457 .update_phy_reg = ohci_update_phy_reg,
1458 .set_config_rom = ohci_set_config_rom,
1459 .send_request = ohci_send_request,
1460 .send_response = ohci_send_response,
730c32f5 1461 .cancel_packet = ohci_cancel_packet,
ed568912
KH
1462 .enable_phys_dma = ohci_enable_phys_dma,
1463
1464 .allocate_iso_context = ohci_allocate_iso_context,
1465 .free_iso_context = ohci_free_iso_context,
1466 .queue_iso = ohci_queue_iso,
5af4e5ea 1467 .send_iso = ohci_send_iso,
ed568912
KH
1468};
1469
1470static int software_reset(struct fw_ohci *ohci)
1471{
1472 int i;
1473
1474 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1475
1476 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1477 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1478 OHCI1394_HCControl_softReset) == 0)
1479 return 0;
1480 msleep(1);
1481 }
1482
1483 return -EBUSY;
1484}
1485
1486/* ---------- pci subsystem interface ---------- */
1487
1488enum {
1489 CLEANUP_SELF_ID,
1490 CLEANUP_REGISTERS,
1491 CLEANUP_IOMEM,
1492 CLEANUP_DISABLE,
1493 CLEANUP_PUT_CARD,
1494};
1495
1496static int cleanup(struct fw_ohci *ohci, int stage, int code)
1497{
1498 struct pci_dev *dev = to_pci_dev(ohci->card.device);
1499
1500 switch (stage) {
1501 case CLEANUP_SELF_ID:
1502 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1503 ohci->self_id_cpu, ohci->self_id_bus);
1504 case CLEANUP_REGISTERS:
1505 kfree(ohci->it_context_list);
1506 kfree(ohci->ir_context_list);
1507 pci_iounmap(dev, ohci->registers);
1508 case CLEANUP_IOMEM:
1509 pci_release_region(dev, 0);
1510 case CLEANUP_DISABLE:
1511 pci_disable_device(dev);
1512 case CLEANUP_PUT_CARD:
1513 fw_card_put(&ohci->card);
1514 }
1515
1516 return code;
1517}
1518
1519static int __devinit
1520pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1521{
1522 struct fw_ohci *ohci;
1523 u32 bus_options, max_receive, link_speed;
1524 u64 guid;
1525 int error_code;
1526 size_t size;
1527
1528 ohci = kzalloc(sizeof *ohci, GFP_KERNEL);
1529 if (ohci == NULL) {
1530 fw_error("Could not malloc fw_ohci data.\n");
1531 return -ENOMEM;
1532 }
1533
1534 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1535
1536 if (pci_enable_device(dev)) {
1537 fw_error("Failed to enable OHCI hardware.\n");
1538 return cleanup(ohci, CLEANUP_PUT_CARD, -ENODEV);
1539 }
1540
1541 pci_set_master(dev);
1542 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1543 pci_set_drvdata(dev, ohci);
1544
1545 spin_lock_init(&ohci->lock);
1546
1547 tasklet_init(&ohci->bus_reset_tasklet,
1548 bus_reset_tasklet, (unsigned long)ohci);
1549
1550 if (pci_request_region(dev, 0, ohci_driver_name)) {
1551 fw_error("MMIO resource unavailable\n");
1552 return cleanup(ohci, CLEANUP_DISABLE, -EBUSY);
1553 }
1554
1555 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1556 if (ohci->registers == NULL) {
1557 fw_error("Failed to remap registers\n");
1558 return cleanup(ohci, CLEANUP_IOMEM, -ENXIO);
1559 }
1560
1561 if (software_reset(ohci)) {
1562 fw_error("Failed to reset ohci card.\n");
1563 return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY);
1564 }
1565
1566 /* Now enable LPS, which we need in order to start accessing
1567 * most of the registers. In fact, on some cards (ALI M5251),
1568 * accessing registers in the SClk domain without LPS enabled
1569 * will lock up the machine. Wait 50msec to make sure we have
1570 * full link enabled. */
1571 reg_write(ohci, OHCI1394_HCControlSet,
1572 OHCI1394_HCControl_LPS |
1573 OHCI1394_HCControl_postedWriteEnable);
1574 flush_writes(ohci);
1575 msleep(50);
1576
1577 reg_write(ohci, OHCI1394_HCControlClear,
1578 OHCI1394_HCControl_noByteSwapData);
1579
1580 reg_write(ohci, OHCI1394_LinkControlSet,
1581 OHCI1394_LinkControl_rcvSelfID |
1582 OHCI1394_LinkControl_cycleTimerEnable |
1583 OHCI1394_LinkControl_cycleMaster);
1584
1585 ar_context_init(&ohci->ar_request_ctx, ohci,
1586 OHCI1394_AsReqRcvContextControlSet);
1587
1588 ar_context_init(&ohci->ar_response_ctx, ohci,
1589 OHCI1394_AsRspRcvContextControlSet);
1590
1591 at_context_init(&ohci->at_request_ctx, ohci,
1592 OHCI1394_AsReqTrContextControlSet);
1593
1594 at_context_init(&ohci->at_response_ctx, ohci,
1595 OHCI1394_AsRspTrContextControlSet);
1596
1597 reg_write(ohci, OHCI1394_ATRetries,
1598 OHCI1394_MAX_AT_REQ_RETRIES |
1599 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1600 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1601
1602 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1603 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1604 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1605 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1606 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1607
1608 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1609 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1610 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1611 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1612 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1613
1614 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1615 fw_error("Out of memory for it/ir contexts.\n");
1616 return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM);
1617 }
1618
1619 /* self-id dma buffer allocation */
1620 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1621 SELF_ID_BUF_SIZE,
1622 &ohci->self_id_bus,
1623 GFP_KERNEL);
1624 if (ohci->self_id_cpu == NULL) {
1625 fw_error("Out of memory for self ID buffer.\n");
1626 return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM);
1627 }
1628
1629 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1630 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1631 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1632 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1633 reg_write(ohci, OHCI1394_IntMaskSet,
1634 OHCI1394_selfIDComplete |
1635 OHCI1394_RQPkt | OHCI1394_RSPkt |
1636 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1637 OHCI1394_isochRx | OHCI1394_isochTx |
1638 OHCI1394_masterIntEnable);
1639
1640 bus_options = reg_read(ohci, OHCI1394_BusOptions);
1641 max_receive = (bus_options >> 12) & 0xf;
1642 link_speed = bus_options & 0x7;
1643 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1644 reg_read(ohci, OHCI1394_GUIDLo);
1645
1646 error_code = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1647 if (error_code < 0)
1648 return cleanup(ohci, CLEANUP_SELF_ID, error_code);
1649
1650 fw_notify("Added fw-ohci device %s.\n", dev->dev.bus_id);
1651
1652 return 0;
1653}
1654
1655static void pci_remove(struct pci_dev *dev)
1656{
1657 struct fw_ohci *ohci;
1658
1659 ohci = pci_get_drvdata(dev);
1660 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_masterIntEnable);
1661 fw_core_remove_card(&ohci->card);
1662
1663 /* FIXME: Fail all pending packets here, now that the upper
1664 * layers can't queue any more. */
1665
1666 software_reset(ohci);
1667 free_irq(dev->irq, ohci);
1668 cleanup(ohci, CLEANUP_SELF_ID, 0);
1669
1670 fw_notify("Removed fw-ohci device.\n");
1671}
1672
1673static struct pci_device_id pci_table[] = {
1674 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1675 { }
1676};
1677
1678MODULE_DEVICE_TABLE(pci, pci_table);
1679
1680static struct pci_driver fw_ohci_pci_driver = {
1681 .name = ohci_driver_name,
1682 .id_table = pci_table,
1683 .probe = pci_probe,
1684 .remove = pci_remove,
1685};
1686
1687MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1688MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1689MODULE_LICENSE("GPL");
1690
1691static int __init fw_ohci_init(void)
1692{
1693 return pci_register_driver(&fw_ohci_pci_driver);
1694}
1695
1696static void __exit fw_ohci_cleanup(void)
1697{
1698 pci_unregister_driver(&fw_ohci_pci_driver);
1699}
1700
1701module_init(fw_ohci_init);
1702module_exit(fw_ohci_cleanup);