Commit | Line | Data |
---|---|---|
ed568912 KH |
1 | /* -*- c-basic-offset: 8 -*- |
2 | * | |
3 | * fw-ohci.c - Driver for OHCI 1394 boards | |
4 | * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software Foundation, | |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | */ | |
20 | ||
21 | #include <linux/kernel.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/poll.h> | |
28 | #include <asm/uaccess.h> | |
29 | #include <asm/semaphore.h> | |
30 | ||
31 | #include "fw-transaction.h" | |
32 | #include "fw-ohci.h" | |
33 | ||
34 | #define descriptor_output_more 0 | |
35 | #define descriptor_output_last (1 << 12) | |
36 | #define descriptor_input_more (2 << 12) | |
37 | #define descriptor_input_last (3 << 12) | |
38 | #define descriptor_status (1 << 11) | |
39 | #define descriptor_key_immediate (2 << 8) | |
40 | #define descriptor_ping (1 << 7) | |
41 | #define descriptor_yy (1 << 6) | |
42 | #define descriptor_no_irq (0 << 4) | |
43 | #define descriptor_irq_error (1 << 4) | |
44 | #define descriptor_irq_always (3 << 4) | |
45 | #define descriptor_branch_always (3 << 2) | |
46 | ||
47 | struct descriptor { | |
48 | __le16 req_count; | |
49 | __le16 control; | |
50 | __le32 data_address; | |
51 | __le32 branch_address; | |
52 | __le16 res_count; | |
53 | __le16 transfer_status; | |
54 | } __attribute__((aligned(16))); | |
55 | ||
56 | struct ar_context { | |
57 | struct fw_ohci *ohci; | |
58 | struct descriptor descriptor; | |
59 | __le32 buffer[512]; | |
60 | dma_addr_t descriptor_bus; | |
61 | dma_addr_t buffer_bus; | |
62 | ||
63 | u32 command_ptr; | |
64 | u32 control_set; | |
65 | u32 control_clear; | |
66 | ||
67 | struct tasklet_struct tasklet; | |
68 | }; | |
69 | ||
70 | struct at_context { | |
71 | struct fw_ohci *ohci; | |
72 | dma_addr_t descriptor_bus; | |
73 | dma_addr_t buffer_bus; | |
74 | ||
75 | struct list_head list; | |
76 | ||
77 | struct { | |
78 | struct descriptor more; | |
79 | __le32 header[4]; | |
80 | struct descriptor last; | |
81 | } d; | |
82 | ||
83 | u32 command_ptr; | |
84 | u32 control_set; | |
85 | u32 control_clear; | |
86 | ||
87 | struct tasklet_struct tasklet; | |
88 | }; | |
89 | ||
90 | #define it_header_sy(v) ((v) << 0) | |
91 | #define it_header_tcode(v) ((v) << 4) | |
92 | #define it_header_channel(v) ((v) << 8) | |
93 | #define it_header_tag(v) ((v) << 14) | |
94 | #define it_header_speed(v) ((v) << 16) | |
95 | #define it_header_data_length(v) ((v) << 16) | |
96 | ||
97 | struct iso_context { | |
98 | struct fw_iso_context base; | |
99 | struct tasklet_struct tasklet; | |
100 | u32 control_set; | |
101 | u32 control_clear; | |
102 | u32 command_ptr; | |
103 | u32 context_match; | |
104 | ||
105 | struct descriptor *buffer; | |
106 | dma_addr_t buffer_bus; | |
107 | struct descriptor *head_descriptor; | |
108 | struct descriptor *tail_descriptor; | |
109 | struct descriptor *tail_descriptor_last; | |
110 | struct descriptor *prev_descriptor; | |
111 | }; | |
112 | ||
113 | #define CONFIG_ROM_SIZE 1024 | |
114 | ||
115 | struct fw_ohci { | |
116 | struct fw_card card; | |
117 | ||
118 | __iomem char *registers; | |
119 | dma_addr_t self_id_bus; | |
120 | __le32 *self_id_cpu; | |
121 | struct tasklet_struct bus_reset_tasklet; | |
122 | int generation; | |
123 | int request_generation; | |
124 | ||
125 | /* Spinlock for accessing fw_ohci data. Never call out of | |
126 | * this driver with this lock held. */ | |
127 | spinlock_t lock; | |
128 | u32 self_id_buffer[512]; | |
129 | ||
130 | /* Config rom buffers */ | |
131 | __be32 *config_rom; | |
132 | dma_addr_t config_rom_bus; | |
133 | __be32 *next_config_rom; | |
134 | dma_addr_t next_config_rom_bus; | |
135 | u32 next_header; | |
136 | ||
137 | struct ar_context ar_request_ctx; | |
138 | struct ar_context ar_response_ctx; | |
139 | struct at_context at_request_ctx; | |
140 | struct at_context at_response_ctx; | |
141 | ||
142 | u32 it_context_mask; | |
143 | struct iso_context *it_context_list; | |
144 | u32 ir_context_mask; | |
145 | struct iso_context *ir_context_list; | |
146 | }; | |
147 | ||
148 | extern inline struct fw_ohci *fw_ohci(struct fw_card *card) | |
149 | { | |
150 | return container_of(card, struct fw_ohci, card); | |
151 | } | |
152 | ||
153 | #define CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 | |
154 | ||
155 | #define CONTEXT_RUN 0x8000 | |
156 | #define CONTEXT_WAKE 0x1000 | |
157 | #define CONTEXT_DEAD 0x0800 | |
158 | #define CONTEXT_ACTIVE 0x0400 | |
159 | ||
160 | #define OHCI1394_MAX_AT_REQ_RETRIES 0x2 | |
161 | #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 | |
162 | #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 | |
163 | ||
164 | #define FW_OHCI_MAJOR 240 | |
165 | #define OHCI1394_REGISTER_SIZE 0x800 | |
166 | #define OHCI_LOOP_COUNT 500 | |
167 | #define OHCI1394_PCI_HCI_Control 0x40 | |
168 | #define SELF_ID_BUF_SIZE 0x800 | |
169 | ||
170 | /* FIXME: Move this to linux/pci_ids.h */ | |
171 | #define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 | |
172 | ||
173 | static char ohci_driver_name[] = KBUILD_MODNAME; | |
174 | ||
175 | extern inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) | |
176 | { | |
177 | writel(data, ohci->registers + offset); | |
178 | } | |
179 | ||
180 | extern inline u32 reg_read(const struct fw_ohci *ohci, int offset) | |
181 | { | |
182 | return readl(ohci->registers + offset); | |
183 | } | |
184 | ||
185 | extern inline void flush_writes(const struct fw_ohci *ohci) | |
186 | { | |
187 | /* Do a dummy read to flush writes. */ | |
188 | reg_read(ohci, OHCI1394_Version); | |
189 | } | |
190 | ||
191 | static int | |
192 | ohci_update_phy_reg(struct fw_card *card, int addr, | |
193 | int clear_bits, int set_bits) | |
194 | { | |
195 | struct fw_ohci *ohci = fw_ohci(card); | |
196 | u32 val, old; | |
197 | ||
198 | reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); | |
199 | msleep(2); | |
200 | val = reg_read(ohci, OHCI1394_PhyControl); | |
201 | if ((val & OHCI1394_PhyControl_ReadDone) == 0) { | |
202 | fw_error("failed to set phy reg bits.\n"); | |
203 | return -EBUSY; | |
204 | } | |
205 | ||
206 | old = OHCI1394_PhyControl_ReadData(val); | |
207 | old = (old & ~clear_bits) | set_bits; | |
208 | reg_write(ohci, OHCI1394_PhyControl, | |
209 | OHCI1394_PhyControl_Write(addr, old)); | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
214 | static void ar_context_run(struct ar_context *ctx) | |
215 | { | |
216 | reg_write(ctx->ohci, ctx->command_ptr, ctx->descriptor_bus | 1); | |
217 | reg_write(ctx->ohci, ctx->control_set, CONTEXT_RUN); | |
218 | flush_writes(ctx->ohci); | |
219 | } | |
220 | ||
221 | static void ar_context_tasklet(unsigned long data) | |
222 | { | |
223 | struct ar_context *ctx = (struct ar_context *)data; | |
224 | struct fw_ohci *ohci = ctx->ohci; | |
225 | u32 status; | |
226 | int length, speed, ack, timestamp, tcode; | |
227 | ||
228 | /* FIXME: What to do about evt_* errors? */ | |
229 | length = le16_to_cpu(ctx->descriptor.req_count) - | |
230 | le16_to_cpu(ctx->descriptor.res_count) - 4; | |
231 | status = le32_to_cpu(ctx->buffer[length / 4]); | |
232 | ack = ((status >> 16) & 0x1f) - 16; | |
233 | speed = (status >> 21) & 0x7; | |
234 | timestamp = status & 0xffff; | |
235 | ||
236 | ctx->buffer[0] = le32_to_cpu(ctx->buffer[0]); | |
237 | ctx->buffer[1] = le32_to_cpu(ctx->buffer[1]); | |
238 | ctx->buffer[2] = le32_to_cpu(ctx->buffer[2]); | |
239 | ||
240 | tcode = (ctx->buffer[0] >> 4) & 0x0f; | |
241 | if (TCODE_IS_BLOCK_PACKET(tcode)) | |
242 | ctx->buffer[3] = le32_to_cpu(ctx->buffer[3]); | |
243 | ||
244 | /* The OHCI bus reset handler synthesizes a phy packet with | |
245 | * the new generation number when a bus reset happens (see | |
246 | * section 8.4.2.3). This helps us determine when a request | |
247 | * was received and make sure we send the response in the same | |
248 | * generation. We only need this for requests; for responses | |
249 | * we use the unique tlabel for finding the matching | |
250 | * request. */ | |
251 | ||
252 | if (ack + 16 == 0x09) | |
253 | ohci->request_generation = (ctx->buffer[2] >> 16) & 0xff; | |
254 | else if (ctx == &ohci->ar_request_ctx) | |
255 | fw_core_handle_request(&ohci->card, speed, ack, timestamp, | |
256 | ohci->request_generation, | |
257 | length, ctx->buffer); | |
258 | else | |
259 | fw_core_handle_response(&ohci->card, speed, ack, timestamp, | |
260 | length, ctx->buffer); | |
261 | ||
262 | ctx->descriptor.data_address = cpu_to_le32(ctx->buffer_bus); | |
263 | ctx->descriptor.req_count = cpu_to_le16(sizeof ctx->buffer); | |
264 | ctx->descriptor.res_count = cpu_to_le16(sizeof ctx->buffer); | |
265 | ||
266 | dma_sync_single_for_device(ohci->card.device, ctx->descriptor_bus, | |
267 | sizeof ctx->descriptor_bus, DMA_TO_DEVICE); | |
268 | ||
269 | /* FIXME: We stop and restart the ar context here, what if we | |
270 | * stop while a receive is in progress? Maybe we could just | |
271 | * loop the context back to itself and use it in buffer fill | |
272 | * mode as intended... */ | |
273 | ||
274 | reg_write(ctx->ohci, ctx->control_clear, CONTEXT_RUN); | |
275 | ar_context_run(ctx); | |
276 | } | |
277 | ||
278 | static int | |
279 | ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 control_set) | |
280 | { | |
281 | ctx->descriptor_bus = | |
282 | dma_map_single(ohci->card.device, &ctx->descriptor, | |
283 | sizeof ctx->descriptor, DMA_TO_DEVICE); | |
284 | if (ctx->descriptor_bus == 0) | |
285 | return -ENOMEM; | |
286 | ||
287 | if (ctx->descriptor_bus & 0xf) | |
288 | fw_notify("descriptor not 16-byte aligned: 0x%08x\n", | |
289 | ctx->descriptor_bus); | |
290 | ||
291 | ctx->buffer_bus = | |
292 | dma_map_single(ohci->card.device, ctx->buffer, | |
293 | sizeof ctx->buffer, DMA_FROM_DEVICE); | |
294 | ||
295 | if (ctx->buffer_bus == 0) { | |
296 | dma_unmap_single(ohci->card.device, ctx->descriptor_bus, | |
297 | sizeof ctx->descriptor, DMA_TO_DEVICE); | |
298 | return -ENOMEM; | |
299 | } | |
300 | ||
301 | memset(&ctx->descriptor, 0, sizeof ctx->descriptor); | |
302 | ctx->descriptor.control = cpu_to_le16(descriptor_input_more | | |
303 | descriptor_status | | |
304 | descriptor_branch_always); | |
305 | ctx->descriptor.req_count = cpu_to_le16(sizeof ctx->buffer); | |
306 | ctx->descriptor.data_address = cpu_to_le32(ctx->buffer_bus); | |
307 | ctx->descriptor.res_count = cpu_to_le16(sizeof ctx->buffer); | |
308 | ||
309 | ctx->control_set = control_set; | |
310 | ctx->control_clear = control_set + 4; | |
311 | ctx->command_ptr = control_set + 12; | |
312 | ctx->ohci = ohci; | |
313 | ||
314 | tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); | |
315 | ||
316 | ar_context_run(ctx); | |
317 | ||
318 | return 0; | |
319 | } | |
320 | ||
321 | static void | |
322 | do_packet_callbacks(struct fw_ohci *ohci, struct list_head *list) | |
323 | { | |
324 | struct fw_packet *p, *next; | |
325 | ||
326 | list_for_each_entry_safe(p, next, list, link) | |
327 | p->callback(p, &ohci->card, p->status); | |
328 | } | |
329 | ||
330 | static void | |
331 | complete_transmission(struct fw_packet *packet, | |
332 | int status, struct list_head *list) | |
333 | { | |
334 | list_move_tail(&packet->link, list); | |
335 | packet->status = status; | |
336 | } | |
337 | ||
338 | /* This function prepares the first packet in the context queue for | |
339 | * transmission. Must always be called with the ochi->lock held to | |
340 | * ensure proper generation handling and locking around packet queue | |
341 | * manipulation. */ | |
342 | static void | |
343 | at_context_setup_packet(struct at_context *ctx, struct list_head *list) | |
344 | { | |
345 | struct fw_packet *packet; | |
346 | struct fw_ohci *ohci = ctx->ohci; | |
347 | int z, tcode; | |
348 | ||
349 | packet = fw_packet(ctx->list.next); | |
350 | ||
351 | memset(&ctx->d, 0, sizeof ctx->d); | |
352 | if (packet->payload_length > 0) { | |
353 | packet->payload_bus = dma_map_single(ohci->card.device, | |
354 | packet->payload, | |
355 | packet->payload_length, | |
356 | DMA_TO_DEVICE); | |
357 | if (packet->payload_bus == 0) { | |
358 | complete_transmission(packet, -ENOMEM, list); | |
359 | return; | |
360 | } | |
361 | ||
362 | ctx->d.more.control = | |
363 | cpu_to_le16(descriptor_output_more | | |
364 | descriptor_key_immediate); | |
365 | ctx->d.more.req_count = cpu_to_le16(packet->header_length); | |
366 | ctx->d.more.res_count = cpu_to_le16(packet->timestamp); | |
367 | ctx->d.last.control = | |
368 | cpu_to_le16(descriptor_output_last | | |
369 | descriptor_irq_always | | |
370 | descriptor_branch_always); | |
371 | ctx->d.last.req_count = cpu_to_le16(packet->payload_length); | |
372 | ctx->d.last.data_address = cpu_to_le32(packet->payload_bus); | |
373 | z = 3; | |
374 | } else { | |
375 | ctx->d.more.control = | |
376 | cpu_to_le16(descriptor_output_last | | |
377 | descriptor_key_immediate | | |
378 | descriptor_irq_always | | |
379 | descriptor_branch_always); | |
380 | ctx->d.more.req_count = cpu_to_le16(packet->header_length); | |
381 | ctx->d.more.res_count = cpu_to_le16(packet->timestamp); | |
382 | z = 2; | |
383 | } | |
384 | ||
385 | /* The DMA format for asyncronous link packets is different | |
386 | * from the IEEE1394 layout, so shift the fields around | |
387 | * accordingly. If header_length is 8, it's a PHY packet, to | |
388 | * which we need to prepend an extra quadlet. */ | |
389 | if (packet->header_length > 8) { | |
390 | ctx->d.header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | |
391 | (packet->speed << 16)); | |
392 | ctx->d.header[1] = cpu_to_le32((packet->header[1] & 0xffff) | | |
393 | (packet->header[0] & 0xffff0000)); | |
394 | ctx->d.header[2] = cpu_to_le32(packet->header[2]); | |
395 | ||
396 | tcode = (packet->header[0] >> 4) & 0x0f; | |
397 | if (TCODE_IS_BLOCK_PACKET(tcode)) | |
398 | ctx->d.header[3] = cpu_to_le32(packet->header[3]); | |
399 | else | |
400 | ctx->d.header[3] = packet->header[3]; | |
401 | } else { | |
402 | ctx->d.header[0] = | |
403 | cpu_to_le32((OHCI1394_phy_tcode << 4) | | |
404 | (packet->speed << 16)); | |
405 | ctx->d.header[1] = cpu_to_le32(packet->header[0]); | |
406 | ctx->d.header[2] = cpu_to_le32(packet->header[1]); | |
407 | ctx->d.more.req_count = cpu_to_le16(12); | |
408 | } | |
409 | ||
410 | /* FIXME: Document how the locking works. */ | |
411 | if (ohci->generation == packet->generation) { | |
412 | reg_write(ctx->ohci, ctx->command_ptr, | |
413 | ctx->descriptor_bus | z); | |
414 | reg_write(ctx->ohci, ctx->control_set, | |
415 | CONTEXT_RUN | CONTEXT_WAKE); | |
416 | } else { | |
417 | /* We dont return error codes from this function; all | |
418 | * transmission errors are reported through the | |
419 | * callback. */ | |
420 | complete_transmission(packet, -ESTALE, list); | |
421 | } | |
422 | } | |
423 | ||
424 | static void at_context_stop(struct at_context *ctx) | |
425 | { | |
426 | u32 reg; | |
427 | ||
428 | reg_write(ctx->ohci, ctx->control_clear, CONTEXT_RUN); | |
429 | ||
430 | reg = reg_read(ctx->ohci, ctx->control_set); | |
431 | if (reg & CONTEXT_ACTIVE) | |
432 | fw_notify("Tried to stop context, but it is still active " | |
433 | "(0x%08x).\n", reg); | |
434 | } | |
435 | ||
436 | static void at_context_tasklet(unsigned long data) | |
437 | { | |
438 | struct at_context *ctx = (struct at_context *)data; | |
439 | struct fw_ohci *ohci = ctx->ohci; | |
440 | struct fw_packet *packet; | |
441 | LIST_HEAD(list); | |
442 | unsigned long flags; | |
443 | int evt; | |
444 | ||
445 | spin_lock_irqsave(&ohci->lock, flags); | |
446 | ||
447 | packet = fw_packet(ctx->list.next); | |
448 | ||
449 | at_context_stop(ctx); | |
450 | ||
451 | if (packet->payload_length > 0) { | |
452 | dma_unmap_single(ohci->card.device, packet->payload_bus, | |
453 | packet->payload_length, DMA_TO_DEVICE); | |
454 | evt = le16_to_cpu(ctx->d.last.transfer_status) & 0x1f; | |
455 | packet->timestamp = le16_to_cpu(ctx->d.last.res_count); | |
456 | } | |
457 | else { | |
458 | evt = le16_to_cpu(ctx->d.more.transfer_status) & 0x1f; | |
459 | packet->timestamp = le16_to_cpu(ctx->d.more.res_count); | |
460 | } | |
461 | ||
462 | if (evt < 16) { | |
463 | switch (evt) { | |
464 | case OHCI1394_evt_timeout: | |
465 | /* Async response transmit timed out. */ | |
466 | complete_transmission(packet, -ETIMEDOUT, &list); | |
467 | break; | |
468 | ||
469 | case OHCI1394_evt_flushed: | |
470 | /* The packet was flushed should give same | |
471 | * error as when we try to use a stale | |
472 | * generation count. */ | |
473 | complete_transmission(packet, -ESTALE, &list); | |
474 | break; | |
475 | ||
476 | case OHCI1394_evt_missing_ack: | |
477 | /* This would be a higher level software | |
478 | * error, it is using a valid (current) | |
479 | * generation count, but the node is not on | |
480 | * the bus. */ | |
481 | complete_transmission(packet, -ENODEV, &list); | |
482 | break; | |
483 | ||
484 | default: | |
485 | complete_transmission(packet, -EIO, &list); | |
486 | break; | |
487 | } | |
488 | } else | |
489 | complete_transmission(packet, evt - 16, &list); | |
490 | ||
491 | /* If more packets are queued, set up the next one. */ | |
492 | if (!list_empty(&ctx->list)) | |
493 | at_context_setup_packet(ctx, &list); | |
494 | ||
495 | spin_unlock_irqrestore(&ohci->lock, flags); | |
496 | ||
497 | do_packet_callbacks(ohci, &list); | |
498 | } | |
499 | ||
500 | static int | |
501 | at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 control_set) | |
502 | { | |
503 | INIT_LIST_HEAD(&ctx->list); | |
504 | ||
505 | ctx->descriptor_bus = | |
506 | dma_map_single(ohci->card.device, &ctx->d, | |
507 | sizeof ctx->d, DMA_TO_DEVICE); | |
508 | if (ctx->descriptor_bus == 0) | |
509 | return -ENOMEM; | |
510 | ||
511 | ctx->control_set = control_set; | |
512 | ctx->control_clear = control_set + 4; | |
513 | ctx->command_ptr = control_set + 12; | |
514 | ctx->ohci = ohci; | |
515 | ||
516 | tasklet_init(&ctx->tasklet, at_context_tasklet, (unsigned long)ctx); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | static void | |
522 | at_context_transmit(struct at_context *ctx, struct fw_packet *packet) | |
523 | { | |
524 | LIST_HEAD(list); | |
525 | unsigned long flags; | |
526 | int was_empty; | |
527 | ||
528 | spin_lock_irqsave(&ctx->ohci->lock, flags); | |
529 | ||
530 | was_empty = list_empty(&ctx->list); | |
531 | list_add_tail(&packet->link, &ctx->list); | |
532 | if (was_empty) | |
533 | at_context_setup_packet(ctx, &list); | |
534 | ||
535 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); | |
536 | ||
537 | do_packet_callbacks(ctx->ohci, &list); | |
538 | } | |
539 | ||
540 | static void bus_reset_tasklet(unsigned long data) | |
541 | { | |
542 | struct fw_ohci *ohci = (struct fw_ohci *)data; | |
543 | int self_id_count, i, j, reg, node_id; | |
544 | int generation, new_generation; | |
545 | unsigned long flags; | |
546 | ||
547 | reg = reg_read(ohci, OHCI1394_NodeID); | |
548 | if (!(reg & OHCI1394_NodeID_idValid)) { | |
549 | fw_error("node ID not valid, new bus reset in progress\n"); | |
550 | return; | |
551 | } | |
552 | node_id = reg & 0xffff; | |
553 | ||
554 | /* The count in the SelfIDCount register is the number of | |
555 | * bytes in the self ID receive buffer. Since we also receive | |
556 | * the inverted quadlets and a header quadlet, we shift one | |
557 | * bit extra to get the actual number of self IDs. */ | |
558 | ||
559 | self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; | |
560 | generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; | |
561 | ||
562 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { | |
563 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) | |
564 | fw_error("inconsistent self IDs\n"); | |
565 | ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); | |
566 | } | |
567 | ||
568 | /* Check the consistency of the self IDs we just read. The | |
569 | * problem we face is that a new bus reset can start while we | |
570 | * read out the self IDs from the DMA buffer. If this happens, | |
571 | * the DMA buffer will be overwritten with new self IDs and we | |
572 | * will read out inconsistent data. The OHCI specification | |
573 | * (section 11.2) recommends a technique similar to | |
574 | * linux/seqlock.h, where we remember the generation of the | |
575 | * self IDs in the buffer before reading them out and compare | |
576 | * it to the current generation after reading them out. If | |
577 | * the two generations match we know we have a consistent set | |
578 | * of self IDs. */ | |
579 | ||
580 | new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; | |
581 | if (new_generation != generation) { | |
582 | fw_notify("recursive bus reset detected, " | |
583 | "discarding self ids\n"); | |
584 | return; | |
585 | } | |
586 | ||
587 | /* FIXME: Document how the locking works. */ | |
588 | spin_lock_irqsave(&ohci->lock, flags); | |
589 | ||
590 | ohci->generation = generation; | |
591 | at_context_stop(&ohci->at_request_ctx); | |
592 | at_context_stop(&ohci->at_response_ctx); | |
593 | reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); | |
594 | ||
595 | /* This next bit is unrelated to the AT context stuff but we | |
596 | * have to do it under the spinlock also. If a new config rom | |
597 | * was set up before this reset, the old one is now no longer | |
598 | * in use and we can free it. Update the config rom pointers | |
599 | * to point to the current config rom and clear the | |
600 | * next_config_rom pointer so a new udpate can take place. */ | |
601 | ||
602 | if (ohci->next_config_rom != NULL) { | |
603 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | |
604 | ohci->config_rom, ohci->config_rom_bus); | |
605 | ohci->config_rom = ohci->next_config_rom; | |
606 | ohci->config_rom_bus = ohci->next_config_rom_bus; | |
607 | ohci->next_config_rom = NULL; | |
608 | ||
609 | /* Restore config_rom image and manually update | |
610 | * config_rom registers. Writing the header quadlet | |
611 | * will indicate that the config rom is ready, so we | |
612 | * do that last. */ | |
613 | reg_write(ohci, OHCI1394_BusOptions, | |
614 | be32_to_cpu(ohci->config_rom[2])); | |
615 | ohci->config_rom[0] = cpu_to_be32(ohci->next_header); | |
616 | reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header); | |
617 | } | |
618 | ||
619 | spin_unlock_irqrestore(&ohci->lock, flags); | |
620 | ||
621 | fw_core_handle_bus_reset(&ohci->card, node_id, generation, | |
622 | self_id_count, ohci->self_id_buffer); | |
623 | } | |
624 | ||
625 | static irqreturn_t irq_handler(int irq, void *data) | |
626 | { | |
627 | struct fw_ohci *ohci = data; | |
628 | u32 event, iso_event; | |
629 | int i; | |
630 | ||
631 | event = reg_read(ohci, OHCI1394_IntEventClear); | |
632 | ||
633 | if (!event) | |
634 | return IRQ_NONE; | |
635 | ||
636 | reg_write(ohci, OHCI1394_IntEventClear, event); | |
637 | ||
638 | if (event & OHCI1394_selfIDComplete) | |
639 | tasklet_schedule(&ohci->bus_reset_tasklet); | |
640 | ||
641 | if (event & OHCI1394_RQPkt) | |
642 | tasklet_schedule(&ohci->ar_request_ctx.tasklet); | |
643 | ||
644 | if (event & OHCI1394_RSPkt) | |
645 | tasklet_schedule(&ohci->ar_response_ctx.tasklet); | |
646 | ||
647 | if (event & OHCI1394_reqTxComplete) | |
648 | tasklet_schedule(&ohci->at_request_ctx.tasklet); | |
649 | ||
650 | if (event & OHCI1394_respTxComplete) | |
651 | tasklet_schedule(&ohci->at_response_ctx.tasklet); | |
652 | ||
653 | iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet); | |
654 | reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); | |
655 | ||
656 | while (iso_event) { | |
657 | i = ffs(iso_event) - 1; | |
658 | tasklet_schedule(&ohci->ir_context_list[i].tasklet); | |
659 | iso_event &= ~(1 << i); | |
660 | } | |
661 | ||
662 | iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet); | |
663 | reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); | |
664 | ||
665 | while (iso_event) { | |
666 | i = ffs(iso_event) - 1; | |
667 | tasklet_schedule(&ohci->it_context_list[i].tasklet); | |
668 | iso_event &= ~(1 << i); | |
669 | } | |
670 | ||
671 | return IRQ_HANDLED; | |
672 | } | |
673 | ||
674 | static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) | |
675 | { | |
676 | struct fw_ohci *ohci = fw_ohci(card); | |
677 | struct pci_dev *dev = to_pci_dev(card->device); | |
678 | ||
679 | /* When the link is not yet enabled, the atomic config rom | |
680 | * update mechanism described below in ohci_set_config_rom() | |
681 | * is not active. We have to update ConfigRomHeader and | |
682 | * BusOptions manually, and the write to ConfigROMmap takes | |
683 | * effect immediately. We tie this to the enabling of the | |
684 | * link, so we have a valid config rom before enabling - the | |
685 | * OHCI requires that ConfigROMhdr and BusOptions have valid | |
686 | * values before enabling. | |
687 | * | |
688 | * However, when the ConfigROMmap is written, some controllers | |
689 | * always read back quadlets 0 and 2 from the config rom to | |
690 | * the ConfigRomHeader and BusOptions registers on bus reset. | |
691 | * They shouldn't do that in this initial case where the link | |
692 | * isn't enabled. This means we have to use the same | |
693 | * workaround here, setting the bus header to 0 and then write | |
694 | * the right values in the bus reset tasklet. | |
695 | */ | |
696 | ||
697 | ohci->next_config_rom = | |
698 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, | |
699 | &ohci->next_config_rom_bus, GFP_KERNEL); | |
700 | if (ohci->next_config_rom == NULL) | |
701 | return -ENOMEM; | |
702 | ||
703 | memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); | |
704 | fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4); | |
705 | ||
706 | ohci->next_header = config_rom[0]; | |
707 | ohci->next_config_rom[0] = 0; | |
708 | reg_write(ohci, OHCI1394_ConfigROMhdr, 0); | |
709 | reg_write(ohci, OHCI1394_BusOptions, config_rom[2]); | |
710 | reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); | |
711 | ||
712 | reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); | |
713 | ||
714 | if (request_irq(dev->irq, irq_handler, | |
715 | SA_SHIRQ, ohci_driver_name, ohci)) { | |
716 | fw_error("Failed to allocate shared interrupt %d.\n", | |
717 | dev->irq); | |
718 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | |
719 | ohci->config_rom, ohci->config_rom_bus); | |
720 | return -EIO; | |
721 | } | |
722 | ||
723 | reg_write(ohci, OHCI1394_HCControlSet, | |
724 | OHCI1394_HCControl_linkEnable | | |
725 | OHCI1394_HCControl_BIBimageValid); | |
726 | flush_writes(ohci); | |
727 | ||
728 | /* We are ready to go, initiate bus reset to finish the | |
729 | * initialization. */ | |
730 | ||
731 | fw_core_initiate_bus_reset(&ohci->card, 1); | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
736 | static int | |
737 | ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |
738 | { | |
739 | struct fw_ohci *ohci; | |
740 | unsigned long flags; | |
741 | int retval = 0; | |
742 | __be32 *next_config_rom; | |
743 | dma_addr_t next_config_rom_bus; | |
744 | ||
745 | ohci = fw_ohci(card); | |
746 | ||
747 | /* When the OHCI controller is enabled, the config rom update | |
748 | * mechanism is a bit tricky, but easy enough to use. See | |
749 | * section 5.5.6 in the OHCI specification. | |
750 | * | |
751 | * The OHCI controller caches the new config rom address in a | |
752 | * shadow register (ConfigROMmapNext) and needs a bus reset | |
753 | * for the changes to take place. When the bus reset is | |
754 | * detected, the controller loads the new values for the | |
755 | * ConfigRomHeader and BusOptions registers from the specified | |
756 | * config rom and loads ConfigROMmap from the ConfigROMmapNext | |
757 | * shadow register. All automatically and atomically. | |
758 | * | |
759 | * Now, there's a twist to this story. The automatic load of | |
760 | * ConfigRomHeader and BusOptions doesn't honor the | |
761 | * noByteSwapData bit, so with a be32 config rom, the | |
762 | * controller will load be32 values in to these registers | |
763 | * during the atomic update, even on litte endian | |
764 | * architectures. The workaround we use is to put a 0 in the | |
765 | * header quadlet; 0 is endian agnostic and means that the | |
766 | * config rom isn't ready yet. In the bus reset tasklet we | |
767 | * then set up the real values for the two registers. | |
768 | * | |
769 | * We use ohci->lock to avoid racing with the code that sets | |
770 | * ohci->next_config_rom to NULL (see bus_reset_tasklet). | |
771 | */ | |
772 | ||
773 | next_config_rom = | |
774 | dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, | |
775 | &next_config_rom_bus, GFP_KERNEL); | |
776 | if (next_config_rom == NULL) | |
777 | return -ENOMEM; | |
778 | ||
779 | spin_lock_irqsave(&ohci->lock, flags); | |
780 | ||
781 | if (ohci->next_config_rom == NULL) { | |
782 | ohci->next_config_rom = next_config_rom; | |
783 | ohci->next_config_rom_bus = next_config_rom_bus; | |
784 | ||
785 | memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE); | |
786 | fw_memcpy_to_be32(ohci->next_config_rom, config_rom, | |
787 | length * 4); | |
788 | ||
789 | ohci->next_header = config_rom[0]; | |
790 | ohci->next_config_rom[0] = 0; | |
791 | ||
792 | reg_write(ohci, OHCI1394_ConfigROMmap, | |
793 | ohci->next_config_rom_bus); | |
794 | } else { | |
795 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | |
796 | next_config_rom, next_config_rom_bus); | |
797 | retval = -EBUSY; | |
798 | } | |
799 | ||
800 | spin_unlock_irqrestore(&ohci->lock, flags); | |
801 | ||
802 | /* Now initiate a bus reset to have the changes take | |
803 | * effect. We clean up the old config rom memory and DMA | |
804 | * mappings in the bus reset tasklet, since the OHCI | |
805 | * controller could need to access it before the bus reset | |
806 | * takes effect. */ | |
807 | if (retval == 0) | |
808 | fw_core_initiate_bus_reset(&ohci->card, 1); | |
809 | ||
810 | return retval; | |
811 | } | |
812 | ||
813 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) | |
814 | { | |
815 | struct fw_ohci *ohci = fw_ohci(card); | |
816 | ||
817 | at_context_transmit(&ohci->at_request_ctx, packet); | |
818 | } | |
819 | ||
820 | static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) | |
821 | { | |
822 | struct fw_ohci *ohci = fw_ohci(card); | |
823 | ||
824 | at_context_transmit(&ohci->at_response_ctx, packet); | |
825 | } | |
826 | ||
827 | static int | |
828 | ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | |
829 | { | |
830 | struct fw_ohci *ohci = fw_ohci(card); | |
831 | unsigned long flags; | |
832 | int retval = 0; | |
833 | ||
834 | /* FIXME: make sure this bitmask is cleared when we clear the | |
835 | * busReset interrupt bit. */ | |
836 | ||
837 | spin_lock_irqsave(&ohci->lock, flags); | |
838 | ||
839 | if (ohci->generation != generation) { | |
840 | retval = -ESTALE; | |
841 | goto out; | |
842 | } | |
843 | ||
844 | if (node_id < 32) { | |
845 | reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << node_id); | |
846 | } else { | |
847 | reg_write(ohci, OHCI1394_PhyReqFilterHiSet, | |
848 | 1 << (node_id - 32)); | |
849 | } | |
850 | flush_writes(ohci); | |
851 | ||
852 | spin_unlock_irqrestore(&ohci->lock, flags); | |
853 | ||
854 | out: | |
855 | return retval; | |
856 | } | |
857 | ||
858 | static void ir_context_tasklet(unsigned long data) | |
859 | { | |
860 | struct iso_context *ctx = (struct iso_context *)data; | |
861 | ||
862 | (void)ctx; | |
863 | } | |
864 | ||
865 | #define ISO_BUFFER_SIZE (64 * 1024) | |
866 | ||
867 | static void flush_iso_context(struct iso_context *ctx) | |
868 | { | |
869 | struct fw_ohci *ohci = fw_ohci(ctx->base.card); | |
870 | struct descriptor *d, *last; | |
871 | u32 address; | |
872 | int z; | |
873 | ||
874 | dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus, | |
875 | ISO_BUFFER_SIZE, DMA_TO_DEVICE); | |
876 | ||
877 | d = ctx->tail_descriptor; | |
878 | last = ctx->tail_descriptor_last; | |
879 | ||
880 | while (last->branch_address != 0 && last->transfer_status != 0) { | |
881 | address = le32_to_cpu(last->branch_address); | |
882 | z = address & 0xf; | |
883 | d = ctx->buffer + (address - ctx->buffer_bus) / sizeof *d; | |
884 | ||
885 | if (z == 2) | |
886 | last = d; | |
887 | else | |
888 | last = d + z - 1; | |
889 | ||
890 | if (le16_to_cpu(last->control) & descriptor_irq_always) | |
891 | ctx->base.callback(&ctx->base, | |
892 | 0, le16_to_cpu(last->res_count), | |
893 | ctx->base.callback_data); | |
894 | } | |
895 | ||
896 | ctx->tail_descriptor = d; | |
897 | ctx->tail_descriptor_last = last; | |
898 | } | |
899 | ||
900 | static void it_context_tasklet(unsigned long data) | |
901 | { | |
902 | struct iso_context *ctx = (struct iso_context *)data; | |
903 | ||
904 | flush_iso_context(ctx); | |
905 | } | |
906 | ||
907 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, | |
908 | int type) | |
909 | { | |
910 | struct fw_ohci *ohci = fw_ohci(card); | |
911 | struct iso_context *ctx, *list; | |
912 | void (*tasklet) (unsigned long data); | |
913 | u32 *mask; | |
914 | unsigned long flags; | |
915 | int index; | |
916 | ||
917 | if (type == FW_ISO_CONTEXT_TRANSMIT) { | |
918 | mask = &ohci->it_context_mask; | |
919 | list = ohci->it_context_list; | |
920 | tasklet = it_context_tasklet; | |
921 | } else { | |
922 | mask = &ohci->ir_context_mask; | |
923 | list = ohci->ir_context_list; | |
924 | tasklet = ir_context_tasklet; | |
925 | } | |
926 | ||
927 | spin_lock_irqsave(&ohci->lock, flags); | |
928 | index = ffs(*mask) - 1; | |
929 | if (index >= 0) | |
930 | *mask &= ~(1 << index); | |
931 | spin_unlock_irqrestore(&ohci->lock, flags); | |
932 | ||
933 | if (index < 0) | |
934 | return ERR_PTR(-EBUSY); | |
935 | ||
936 | ctx = &list[index]; | |
937 | memset(ctx, 0, sizeof *ctx); | |
938 | tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); | |
939 | ||
940 | ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); | |
941 | if (ctx->buffer == NULL) { | |
942 | spin_lock_irqsave(&ohci->lock, flags); | |
943 | *mask |= 1 << index; | |
944 | spin_unlock_irqrestore(&ohci->lock, flags); | |
945 | return ERR_PTR(-ENOMEM); | |
946 | } | |
947 | ||
948 | ctx->buffer_bus = | |
949 | dma_map_single(card->device, ctx->buffer, | |
950 | ISO_BUFFER_SIZE, DMA_TO_DEVICE); | |
951 | ||
952 | ctx->head_descriptor = ctx->buffer; | |
953 | ctx->prev_descriptor = ctx->buffer; | |
954 | ctx->tail_descriptor = ctx->buffer; | |
955 | ctx->tail_descriptor_last = ctx->buffer; | |
956 | ||
957 | /* We put a dummy descriptor in the buffer that has a NULL | |
958 | * branch address and looks like it's been sent. That way we | |
959 | * have a descriptor to append DMA programs to. Also, the | |
960 | * ring buffer invariant is that it always has at least one | |
961 | * element so that head == tail means buffer full. */ | |
962 | ||
963 | memset(ctx->head_descriptor, 0, sizeof *ctx->head_descriptor); | |
964 | ctx->head_descriptor->control = | |
965 | cpu_to_le16(descriptor_output_last); | |
966 | ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011); | |
967 | ctx->head_descriptor++; | |
968 | ||
969 | return &ctx->base; | |
970 | } | |
971 | ||
972 | static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) | |
973 | { | |
974 | struct iso_context *ctx = (struct iso_context *)base; | |
975 | struct fw_ohci *ohci = fw_ohci(ctx->base.card); | |
976 | u32 cycle_match = 0; | |
977 | int index; | |
978 | ||
979 | index = ctx - ohci->it_context_list; | |
980 | if (cycle > 0) | |
981 | cycle_match = CONTEXT_CYCLE_MATCH_ENABLE | | |
982 | (cycle & 0x7fff) << 16; | |
983 | ||
984 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); | |
985 | reg_write(ohci, OHCI1394_IsoXmitCommandPtr(index), | |
986 | le32_to_cpu(ctx->tail_descriptor_last->branch_address)); | |
987 | reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0); | |
988 | reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), | |
989 | CONTEXT_RUN | cycle_match); | |
990 | flush_writes(ohci); | |
991 | ||
992 | return 0; | |
993 | } | |
994 | ||
995 | static void ohci_free_iso_context(struct fw_iso_context *base) | |
996 | { | |
997 | struct fw_ohci *ohci = fw_ohci(base->card); | |
998 | struct iso_context *ctx = (struct iso_context *)base; | |
999 | unsigned long flags; | |
1000 | int index; | |
1001 | ||
1002 | flush_iso_context(ctx); | |
1003 | ||
1004 | spin_lock_irqsave(&ohci->lock, flags); | |
1005 | ||
1006 | if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { | |
1007 | index = ctx - ohci->it_context_list; | |
1008 | reg_write(ohci, OHCI1394_IsoXmitContextControlClear(index), ~0); | |
1009 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); | |
1010 | ohci->it_context_mask |= 1 << index; | |
1011 | } else { | |
1012 | index = ctx - ohci->ir_context_list; | |
1013 | reg_write(ohci, OHCI1394_IsoRcvContextControlClear(index), ~0); | |
1014 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); | |
1015 | ohci->ir_context_mask |= 1 << index; | |
1016 | } | |
1017 | flush_writes(ohci); | |
1018 | ||
1019 | dma_unmap_single(ohci->card.device, ctx->buffer_bus, | |
1020 | ISO_BUFFER_SIZE, DMA_TO_DEVICE); | |
1021 | ||
1022 | spin_unlock_irqrestore(&ohci->lock, flags); | |
1023 | } | |
1024 | ||
1025 | static int | |
1026 | ohci_queue_iso(struct fw_iso_context *base, | |
1027 | struct fw_iso_packet *packet, void *payload) | |
1028 | { | |
1029 | struct iso_context *ctx = (struct iso_context *)base; | |
1030 | struct fw_ohci *ohci = fw_ohci(ctx->base.card); | |
1031 | struct descriptor *d, *end, *last, *tail, *pd; | |
1032 | struct fw_iso_packet *p; | |
1033 | __le32 *header; | |
1034 | dma_addr_t d_bus; | |
1035 | u32 z, header_z, payload_z, irq; | |
1036 | u32 payload_index, payload_end_index, next_page_index; | |
1037 | int index, page, end_page, i, length, offset; | |
1038 | ||
1039 | /* FIXME: Cycle lost behavior should be configurable: lose | |
1040 | * packet, retransmit or terminate.. */ | |
1041 | ||
1042 | p = packet; | |
1043 | payload_index = payload - ctx->base.buffer; | |
1044 | d = ctx->head_descriptor; | |
1045 | tail = ctx->tail_descriptor; | |
1046 | end = ctx->buffer + ISO_BUFFER_SIZE / sizeof(struct descriptor); | |
1047 | ||
1048 | if (p->skip) | |
1049 | z = 1; | |
1050 | else | |
1051 | z = 2; | |
1052 | if (p->header_length > 0) | |
1053 | z++; | |
1054 | ||
1055 | /* Determine the first page the payload isn't contained in. */ | |
1056 | end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; | |
1057 | if (p->payload_length > 0) | |
1058 | payload_z = end_page - (payload_index >> PAGE_SHIFT); | |
1059 | else | |
1060 | payload_z = 0; | |
1061 | ||
1062 | z += payload_z; | |
1063 | ||
1064 | /* Get header size in number of descriptors. */ | |
1065 | header_z = DIV_ROUND_UP(p->header_length, sizeof *d); | |
1066 | ||
1067 | if (d + z + header_z <= tail) { | |
1068 | goto has_space; | |
1069 | } else if (d > tail && d + z + header_z <= end) { | |
1070 | goto has_space; | |
1071 | } else if (d > tail && ctx->buffer + z + header_z <= tail) { | |
1072 | d = ctx->buffer; | |
1073 | goto has_space; | |
1074 | } | |
1075 | ||
1076 | /* No space in buffer */ | |
1077 | return -1; | |
1078 | ||
1079 | has_space: | |
1080 | memset(d, 0, (z + header_z) * sizeof *d); | |
1081 | d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof *d; | |
1082 | ||
1083 | if (!p->skip) { | |
1084 | d[0].control = cpu_to_le16(descriptor_key_immediate); | |
1085 | d[0].req_count = cpu_to_le16(8); | |
1086 | ||
1087 | header = (__le32 *) &d[1]; | |
1088 | header[0] = cpu_to_le32(it_header_sy(p->sy) | | |
1089 | it_header_tag(p->tag) | | |
1090 | it_header_tcode(TCODE_STREAM_DATA) | | |
1091 | it_header_channel(ctx->base.channel) | | |
1092 | it_header_speed(ctx->base.speed)); | |
1093 | header[1] = | |
1094 | cpu_to_le32(it_header_data_length(p->header_length + | |
1095 | p->payload_length)); | |
1096 | } | |
1097 | ||
1098 | if (p->header_length > 0) { | |
1099 | d[2].req_count = cpu_to_le16(p->header_length); | |
1100 | d[2].data_address = cpu_to_le32(d_bus + z * sizeof *d); | |
1101 | memcpy(&d[z], p->header, p->header_length); | |
1102 | } | |
1103 | ||
1104 | pd = d + z - payload_z; | |
1105 | payload_end_index = payload_index + p->payload_length; | |
1106 | for (i = 0; i < payload_z; i++) { | |
1107 | page = payload_index >> PAGE_SHIFT; | |
1108 | offset = payload_index & ~PAGE_MASK; | |
1109 | next_page_index = (page + 1) << PAGE_SHIFT; | |
1110 | length = | |
1111 | min(next_page_index, payload_end_index) - payload_index; | |
1112 | pd[i].req_count = cpu_to_le16(length); | |
1113 | pd[i].data_address = cpu_to_le32(ctx->base.pages[page] + offset); | |
1114 | ||
1115 | payload_index += length; | |
1116 | } | |
1117 | ||
1118 | if (z == 2) | |
1119 | last = d; | |
1120 | else | |
1121 | last = d + z - 1; | |
1122 | ||
1123 | if (p->interrupt) | |
1124 | irq = descriptor_irq_always; | |
1125 | else | |
1126 | irq = descriptor_no_irq; | |
1127 | ||
1128 | last->control = cpu_to_le16(descriptor_output_last | | |
1129 | descriptor_status | | |
1130 | descriptor_branch_always | | |
1131 | irq); | |
1132 | ||
1133 | dma_sync_single_for_device(ohci->card.device, ctx->buffer_bus, | |
1134 | ISO_BUFFER_SIZE, DMA_TO_DEVICE); | |
1135 | ||
1136 | ctx->head_descriptor = d + z + header_z; | |
1137 | ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z); | |
1138 | ctx->prev_descriptor = last; | |
1139 | ||
1140 | index = ctx - ohci->it_context_list; | |
1141 | reg_write(ohci, OHCI1394_IsoXmitContextControlSet(index), CONTEXT_WAKE); | |
1142 | flush_writes(ohci); | |
1143 | ||
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | static struct fw_card_driver ohci_driver = { | |
1148 | .name = ohci_driver_name, | |
1149 | .enable = ohci_enable, | |
1150 | .update_phy_reg = ohci_update_phy_reg, | |
1151 | .set_config_rom = ohci_set_config_rom, | |
1152 | .send_request = ohci_send_request, | |
1153 | .send_response = ohci_send_response, | |
1154 | .enable_phys_dma = ohci_enable_phys_dma, | |
1155 | ||
1156 | .allocate_iso_context = ohci_allocate_iso_context, | |
1157 | .free_iso_context = ohci_free_iso_context, | |
1158 | .queue_iso = ohci_queue_iso, | |
1159 | .send_iso = ohci_send_iso | |
1160 | }; | |
1161 | ||
1162 | static int software_reset(struct fw_ohci *ohci) | |
1163 | { | |
1164 | int i; | |
1165 | ||
1166 | reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); | |
1167 | ||
1168 | for (i = 0; i < OHCI_LOOP_COUNT; i++) { | |
1169 | if ((reg_read(ohci, OHCI1394_HCControlSet) & | |
1170 | OHCI1394_HCControl_softReset) == 0) | |
1171 | return 0; | |
1172 | msleep(1); | |
1173 | } | |
1174 | ||
1175 | return -EBUSY; | |
1176 | } | |
1177 | ||
1178 | /* ---------- pci subsystem interface ---------- */ | |
1179 | ||
1180 | enum { | |
1181 | CLEANUP_SELF_ID, | |
1182 | CLEANUP_REGISTERS, | |
1183 | CLEANUP_IOMEM, | |
1184 | CLEANUP_DISABLE, | |
1185 | CLEANUP_PUT_CARD, | |
1186 | }; | |
1187 | ||
1188 | static int cleanup(struct fw_ohci *ohci, int stage, int code) | |
1189 | { | |
1190 | struct pci_dev *dev = to_pci_dev(ohci->card.device); | |
1191 | ||
1192 | switch (stage) { | |
1193 | case CLEANUP_SELF_ID: | |
1194 | dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE, | |
1195 | ohci->self_id_cpu, ohci->self_id_bus); | |
1196 | case CLEANUP_REGISTERS: | |
1197 | kfree(ohci->it_context_list); | |
1198 | kfree(ohci->ir_context_list); | |
1199 | pci_iounmap(dev, ohci->registers); | |
1200 | case CLEANUP_IOMEM: | |
1201 | pci_release_region(dev, 0); | |
1202 | case CLEANUP_DISABLE: | |
1203 | pci_disable_device(dev); | |
1204 | case CLEANUP_PUT_CARD: | |
1205 | fw_card_put(&ohci->card); | |
1206 | } | |
1207 | ||
1208 | return code; | |
1209 | } | |
1210 | ||
1211 | static int __devinit | |
1212 | pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |
1213 | { | |
1214 | struct fw_ohci *ohci; | |
1215 | u32 bus_options, max_receive, link_speed; | |
1216 | u64 guid; | |
1217 | int error_code; | |
1218 | size_t size; | |
1219 | ||
1220 | ohci = kzalloc(sizeof *ohci, GFP_KERNEL); | |
1221 | if (ohci == NULL) { | |
1222 | fw_error("Could not malloc fw_ohci data.\n"); | |
1223 | return -ENOMEM; | |
1224 | } | |
1225 | ||
1226 | fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); | |
1227 | ||
1228 | if (pci_enable_device(dev)) { | |
1229 | fw_error("Failed to enable OHCI hardware.\n"); | |
1230 | return cleanup(ohci, CLEANUP_PUT_CARD, -ENODEV); | |
1231 | } | |
1232 | ||
1233 | pci_set_master(dev); | |
1234 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); | |
1235 | pci_set_drvdata(dev, ohci); | |
1236 | ||
1237 | spin_lock_init(&ohci->lock); | |
1238 | ||
1239 | tasklet_init(&ohci->bus_reset_tasklet, | |
1240 | bus_reset_tasklet, (unsigned long)ohci); | |
1241 | ||
1242 | if (pci_request_region(dev, 0, ohci_driver_name)) { | |
1243 | fw_error("MMIO resource unavailable\n"); | |
1244 | return cleanup(ohci, CLEANUP_DISABLE, -EBUSY); | |
1245 | } | |
1246 | ||
1247 | ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); | |
1248 | if (ohci->registers == NULL) { | |
1249 | fw_error("Failed to remap registers\n"); | |
1250 | return cleanup(ohci, CLEANUP_IOMEM, -ENXIO); | |
1251 | } | |
1252 | ||
1253 | if (software_reset(ohci)) { | |
1254 | fw_error("Failed to reset ohci card.\n"); | |
1255 | return cleanup(ohci, CLEANUP_REGISTERS, -EBUSY); | |
1256 | } | |
1257 | ||
1258 | /* Now enable LPS, which we need in order to start accessing | |
1259 | * most of the registers. In fact, on some cards (ALI M5251), | |
1260 | * accessing registers in the SClk domain without LPS enabled | |
1261 | * will lock up the machine. Wait 50msec to make sure we have | |
1262 | * full link enabled. */ | |
1263 | reg_write(ohci, OHCI1394_HCControlSet, | |
1264 | OHCI1394_HCControl_LPS | | |
1265 | OHCI1394_HCControl_postedWriteEnable); | |
1266 | flush_writes(ohci); | |
1267 | msleep(50); | |
1268 | ||
1269 | reg_write(ohci, OHCI1394_HCControlClear, | |
1270 | OHCI1394_HCControl_noByteSwapData); | |
1271 | ||
1272 | reg_write(ohci, OHCI1394_LinkControlSet, | |
1273 | OHCI1394_LinkControl_rcvSelfID | | |
1274 | OHCI1394_LinkControl_cycleTimerEnable | | |
1275 | OHCI1394_LinkControl_cycleMaster); | |
1276 | ||
1277 | ar_context_init(&ohci->ar_request_ctx, ohci, | |
1278 | OHCI1394_AsReqRcvContextControlSet); | |
1279 | ||
1280 | ar_context_init(&ohci->ar_response_ctx, ohci, | |
1281 | OHCI1394_AsRspRcvContextControlSet); | |
1282 | ||
1283 | at_context_init(&ohci->at_request_ctx, ohci, | |
1284 | OHCI1394_AsReqTrContextControlSet); | |
1285 | ||
1286 | at_context_init(&ohci->at_response_ctx, ohci, | |
1287 | OHCI1394_AsRspTrContextControlSet); | |
1288 | ||
1289 | reg_write(ohci, OHCI1394_ATRetries, | |
1290 | OHCI1394_MAX_AT_REQ_RETRIES | | |
1291 | (OHCI1394_MAX_AT_RESP_RETRIES << 4) | | |
1292 | (OHCI1394_MAX_PHYS_RESP_RETRIES << 8)); | |
1293 | ||
1294 | reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); | |
1295 | ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); | |
1296 | reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); | |
1297 | size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); | |
1298 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); | |
1299 | ||
1300 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); | |
1301 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); | |
1302 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); | |
1303 | size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); | |
1304 | ohci->ir_context_list = kzalloc(size, GFP_KERNEL); | |
1305 | ||
1306 | if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { | |
1307 | fw_error("Out of memory for it/ir contexts.\n"); | |
1308 | return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM); | |
1309 | } | |
1310 | ||
1311 | /* self-id dma buffer allocation */ | |
1312 | ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device, | |
1313 | SELF_ID_BUF_SIZE, | |
1314 | &ohci->self_id_bus, | |
1315 | GFP_KERNEL); | |
1316 | if (ohci->self_id_cpu == NULL) { | |
1317 | fw_error("Out of memory for self ID buffer.\n"); | |
1318 | return cleanup(ohci, CLEANUP_REGISTERS, -ENOMEM); | |
1319 | } | |
1320 | ||
1321 | reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); | |
1322 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000); | |
1323 | reg_write(ohci, OHCI1394_IntEventClear, ~0); | |
1324 | reg_write(ohci, OHCI1394_IntMaskClear, ~0); | |
1325 | reg_write(ohci, OHCI1394_IntMaskSet, | |
1326 | OHCI1394_selfIDComplete | | |
1327 | OHCI1394_RQPkt | OHCI1394_RSPkt | | |
1328 | OHCI1394_reqTxComplete | OHCI1394_respTxComplete | | |
1329 | OHCI1394_isochRx | OHCI1394_isochTx | | |
1330 | OHCI1394_masterIntEnable); | |
1331 | ||
1332 | bus_options = reg_read(ohci, OHCI1394_BusOptions); | |
1333 | max_receive = (bus_options >> 12) & 0xf; | |
1334 | link_speed = bus_options & 0x7; | |
1335 | guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | | |
1336 | reg_read(ohci, OHCI1394_GUIDLo); | |
1337 | ||
1338 | error_code = fw_card_add(&ohci->card, max_receive, link_speed, guid); | |
1339 | if (error_code < 0) | |
1340 | return cleanup(ohci, CLEANUP_SELF_ID, error_code); | |
1341 | ||
1342 | fw_notify("Added fw-ohci device %s.\n", dev->dev.bus_id); | |
1343 | ||
1344 | return 0; | |
1345 | } | |
1346 | ||
1347 | static void pci_remove(struct pci_dev *dev) | |
1348 | { | |
1349 | struct fw_ohci *ohci; | |
1350 | ||
1351 | ohci = pci_get_drvdata(dev); | |
1352 | reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_masterIntEnable); | |
1353 | fw_core_remove_card(&ohci->card); | |
1354 | ||
1355 | /* FIXME: Fail all pending packets here, now that the upper | |
1356 | * layers can't queue any more. */ | |
1357 | ||
1358 | software_reset(ohci); | |
1359 | free_irq(dev->irq, ohci); | |
1360 | cleanup(ohci, CLEANUP_SELF_ID, 0); | |
1361 | ||
1362 | fw_notify("Removed fw-ohci device.\n"); | |
1363 | } | |
1364 | ||
1365 | static struct pci_device_id pci_table[] = { | |
1366 | { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, | |
1367 | { } | |
1368 | }; | |
1369 | ||
1370 | MODULE_DEVICE_TABLE(pci, pci_table); | |
1371 | ||
1372 | static struct pci_driver fw_ohci_pci_driver = { | |
1373 | .name = ohci_driver_name, | |
1374 | .id_table = pci_table, | |
1375 | .probe = pci_probe, | |
1376 | .remove = pci_remove, | |
1377 | }; | |
1378 | ||
1379 | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | |
1380 | MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); | |
1381 | MODULE_LICENSE("GPL"); | |
1382 | ||
1383 | static int __init fw_ohci_init(void) | |
1384 | { | |
1385 | return pci_register_driver(&fw_ohci_pci_driver); | |
1386 | } | |
1387 | ||
1388 | static void __exit fw_ohci_cleanup(void) | |
1389 | { | |
1390 | pci_unregister_driver(&fw_ohci_pci_driver); | |
1391 | } | |
1392 | ||
1393 | module_init(fw_ohci_init); | |
1394 | module_exit(fw_ohci_cleanup); |