1 /* -*- c-basic-offset: 8 -*-
3 * fw-device-cdev.c - Char device for device raw access
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/errno.h>
26 #include <linux/device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
31 #include <linux/idr.h>
32 #include <linux/compat.h>
33 #include <asm/uaccess.h>
34 #include "fw-transaction.h"
35 #include "fw-topology.h"
36 #include "fw-device.h"
37 #include "fw-device-cdev.h"
42 * - bus resets sends a new packet with new generation and node id
46 /* dequeue_event() just kfree()'s the event, so the event has to be
47 * the first field in the struct. */
50 struct { void *data
; size_t size
; } v
[2];
51 struct list_head link
;
56 struct fw_cdev_event_bus_reset reset
;
61 struct fw_transaction transaction
;
62 struct client
*client
;
63 struct list_head link
;
64 struct fw_cdev_event_response response
;
67 struct iso_interrupt
{
69 struct fw_cdev_event_iso_interrupt interrupt
;
74 struct fw_device
*device
;
77 struct list_head handler_list
;
78 struct list_head request_list
;
79 struct list_head transaction_list
;
80 struct list_head descriptor_list
;
82 struct list_head event_list
;
83 wait_queue_head_t wait
;
84 u64 bus_reset_closure
;
86 struct fw_iso_context
*iso_context
;
87 struct fw_iso_buffer buffer
;
88 unsigned long vm_start
;
90 struct list_head link
;
93 static inline void __user
*
94 u64_to_uptr(__u64 value
)
96 return (void __user
*)(unsigned long)value
;
100 uptr_to_u64(void __user
*ptr
)
102 return (__u64
)(unsigned long)ptr
;
105 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
107 struct fw_device
*device
;
108 struct client
*client
;
111 device
= fw_device_from_devt(inode
->i_rdev
);
115 client
= kzalloc(sizeof *client
, GFP_KERNEL
);
119 client
->device
= fw_device_get(device
);
120 INIT_LIST_HEAD(&client
->event_list
);
121 INIT_LIST_HEAD(&client
->handler_list
);
122 INIT_LIST_HEAD(&client
->request_list
);
123 INIT_LIST_HEAD(&client
->transaction_list
);
124 INIT_LIST_HEAD(&client
->descriptor_list
);
125 spin_lock_init(&client
->lock
);
126 init_waitqueue_head(&client
->wait
);
128 file
->private_data
= client
;
130 spin_lock_irqsave(&device
->card
->lock
, flags
);
131 list_add_tail(&client
->link
, &device
->client_list
);
132 spin_unlock_irqrestore(&device
->card
->lock
, flags
);
137 static void queue_event(struct client
*client
, struct event
*event
,
138 void *data0
, size_t size0
, void *data1
, size_t size1
)
142 event
->v
[0].data
= data0
;
143 event
->v
[0].size
= size0
;
144 event
->v
[1].data
= data1
;
145 event
->v
[1].size
= size1
;
147 spin_lock_irqsave(&client
->lock
, flags
);
149 list_add_tail(&event
->link
, &client
->event_list
);
150 wake_up_interruptible(&client
->wait
);
152 spin_unlock_irqrestore(&client
->lock
, flags
);
156 dequeue_event(struct client
*client
, char __user
*buffer
, size_t count
)
163 retval
= wait_event_interruptible(client
->wait
,
164 !list_empty(&client
->event_list
) ||
165 fw_device_is_shutdown(client
->device
));
169 if (list_empty(&client
->event_list
) &&
170 fw_device_is_shutdown(client
->device
))
173 spin_lock_irqsave(&client
->lock
, flags
);
174 event
= container_of(client
->event_list
.next
, struct event
, link
);
175 list_del(&event
->link
);
176 spin_unlock_irqrestore(&client
->lock
, flags
);
179 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
180 size
= min(event
->v
[i
].size
, count
- total
);
181 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
)) {
196 fw_device_op_read(struct file
*file
,
197 char __user
*buffer
, size_t count
, loff_t
*offset
)
199 struct client
*client
= file
->private_data
;
201 return dequeue_event(client
, buffer
, count
);
205 fill_bus_reset_event(struct fw_cdev_event_bus_reset
*event
,
206 struct client
*client
)
208 struct fw_card
*card
= client
->device
->card
;
210 event
->closure
= client
->bus_reset_closure
;
211 event
->type
= FW_CDEV_EVENT_BUS_RESET
;
212 event
->node_id
= client
->device
->node_id
;
213 event
->local_node_id
= card
->local_node
->node_id
;
214 event
->bm_node_id
= 0; /* FIXME: We don't track the BM. */
215 event
->irm_node_id
= card
->irm_node
->node_id
;
216 event
->root_node_id
= card
->root_node
->node_id
;
217 event
->generation
= card
->generation
;
221 for_each_client(struct fw_device
*device
,
222 void (*callback
)(struct client
*client
))
224 struct fw_card
*card
= device
->card
;
228 spin_lock_irqsave(&card
->lock
, flags
);
230 list_for_each_entry(c
, &device
->client_list
, link
)
233 spin_unlock_irqrestore(&card
->lock
, flags
);
237 queue_bus_reset_event(struct client
*client
)
239 struct bus_reset
*bus_reset
;
241 bus_reset
= kzalloc(sizeof *bus_reset
, GFP_ATOMIC
);
242 if (bus_reset
== NULL
) {
243 fw_notify("Out of memory when allocating bus reset event\n");
247 fill_bus_reset_event(&bus_reset
->reset
, client
);
249 queue_event(client
, &bus_reset
->event
,
250 &bus_reset
->reset
, sizeof bus_reset
->reset
, NULL
, 0);
253 void fw_device_cdev_update(struct fw_device
*device
)
255 for_each_client(device
, queue_bus_reset_event
);
258 static void wake_up_client(struct client
*client
)
260 wake_up_interruptible(&client
->wait
);
263 void fw_device_cdev_remove(struct fw_device
*device
)
265 for_each_client(device
, wake_up_client
);
268 static int ioctl_get_info(struct client
*client
, void __user
*arg
)
270 struct fw_cdev_get_info get_info
;
271 struct fw_cdev_event_bus_reset bus_reset
;
273 if (copy_from_user(&get_info
, arg
, sizeof get_info
))
276 client
->version
= get_info
.version
;
277 get_info
.version
= FW_CDEV_VERSION
;
279 if (get_info
.rom
!= 0) {
280 void __user
*uptr
= u64_to_uptr(get_info
.rom
);
281 size_t want
= get_info
.rom_length
;
282 size_t have
= client
->device
->config_rom_length
* 4;
284 if (copy_to_user(uptr
, client
->device
->config_rom
,
288 get_info
.rom_length
= client
->device
->config_rom_length
* 4;
290 client
->bus_reset_closure
= get_info
.bus_reset_closure
;
291 if (get_info
.bus_reset
!= 0) {
292 void __user
*uptr
= u64_to_uptr(get_info
.bus_reset
);
294 fill_bus_reset_event(&bus_reset
, client
);
295 if (copy_to_user(uptr
, &bus_reset
, sizeof bus_reset
))
299 get_info
.card
= client
->device
->card
->index
;
301 if (copy_to_user(arg
, &get_info
, sizeof get_info
))
308 complete_transaction(struct fw_card
*card
, int rcode
,
309 void *payload
, size_t length
, void *data
)
311 struct response
*response
= data
;
312 struct client
*client
= response
->client
;
315 if (length
< response
->response
.length
)
316 response
->response
.length
= length
;
317 if (rcode
== RCODE_COMPLETE
)
318 memcpy(response
->response
.data
, payload
,
319 response
->response
.length
);
321 spin_lock_irqsave(&client
->lock
, flags
);
322 list_del(&response
->link
);
323 spin_unlock_irqrestore(&client
->lock
, flags
);
325 response
->response
.type
= FW_CDEV_EVENT_RESPONSE
;
326 response
->response
.rcode
= rcode
;
327 queue_event(client
, &response
->event
,
328 &response
->response
, sizeof response
->response
,
329 response
->response
.data
, response
->response
.length
);
332 static ssize_t
ioctl_send_request(struct client
*client
, void __user
*arg
)
334 struct fw_device
*device
= client
->device
;
335 struct fw_cdev_send_request request
;
336 struct response
*response
;
339 if (copy_from_user(&request
, arg
, sizeof request
))
342 /* What is the biggest size we'll accept, really? */
343 if (request
.length
> 4096)
346 response
= kmalloc(sizeof *response
+ request
.length
, GFP_KERNEL
);
347 if (response
== NULL
)
350 response
->client
= client
;
351 response
->response
.length
= request
.length
;
352 response
->response
.closure
= request
.closure
;
355 copy_from_user(response
->response
.data
,
356 u64_to_uptr(request
.data
), request
.length
)) {
361 spin_lock_irqsave(&client
->lock
, flags
);
362 list_add_tail(&response
->link
, &client
->transaction_list
);
363 spin_unlock_irqrestore(&client
->lock
, flags
);
365 fw_send_request(device
->card
, &response
->transaction
,
366 request
.tcode
& 0x1f,
367 device
->node
->node_id
,
369 device
->node
->max_speed
,
371 response
->response
.data
, request
.length
,
372 complete_transaction
, response
);
375 return sizeof request
+ request
.length
;
377 return sizeof request
;
380 struct address_handler
{
381 struct fw_address_handler handler
;
383 struct client
*client
;
384 struct list_head link
;
388 struct fw_request
*request
;
392 struct list_head link
;
395 struct request_event
{
397 struct fw_cdev_event_request request
;
401 handle_request(struct fw_card
*card
, struct fw_request
*r
,
402 int tcode
, int destination
, int source
,
403 int generation
, int speed
,
404 unsigned long long offset
,
405 void *payload
, size_t length
, void *callback_data
)
407 struct address_handler
*handler
= callback_data
;
408 struct request
*request
;
409 struct request_event
*e
;
411 struct client
*client
= handler
->client
;
413 request
= kmalloc(sizeof *request
, GFP_ATOMIC
);
414 e
= kmalloc(sizeof *e
, GFP_ATOMIC
);
415 if (request
== NULL
|| e
== NULL
) {
418 fw_send_response(card
, r
, RCODE_CONFLICT_ERROR
);
422 request
->request
= r
;
423 request
->data
= payload
;
424 request
->length
= length
;
426 spin_lock_irqsave(&client
->lock
, flags
);
427 request
->serial
= client
->request_serial
++;
428 list_add_tail(&request
->link
, &client
->request_list
);
429 spin_unlock_irqrestore(&client
->lock
, flags
);
431 e
->request
.type
= FW_CDEV_EVENT_REQUEST
;
432 e
->request
.tcode
= tcode
;
433 e
->request
.offset
= offset
;
434 e
->request
.length
= length
;
435 e
->request
.serial
= request
->serial
;
436 e
->request
.closure
= handler
->closure
;
438 queue_event(client
, &e
->event
,
439 &e
->request
, sizeof e
->request
, payload
, length
);
442 static int ioctl_allocate(struct client
*client
, void __user
*arg
)
444 struct fw_cdev_allocate request
;
445 struct address_handler
*handler
;
447 struct fw_address_region region
;
449 if (copy_from_user(&request
, arg
, sizeof request
))
452 handler
= kmalloc(sizeof *handler
, GFP_KERNEL
);
456 region
.start
= request
.offset
;
457 region
.end
= request
.offset
+ request
.length
;
458 handler
->handler
.length
= request
.length
;
459 handler
->handler
.address_callback
= handle_request
;
460 handler
->handler
.callback_data
= handler
;
461 handler
->closure
= request
.closure
;
462 handler
->client
= client
;
464 if (fw_core_add_address_handler(&handler
->handler
, ®ion
) < 0) {
469 spin_lock_irqsave(&client
->lock
, flags
);
470 list_add_tail(&handler
->link
, &client
->handler_list
);
471 spin_unlock_irqrestore(&client
->lock
, flags
);
476 static int ioctl_deallocate(struct client
*client
, void __user
*arg
)
478 struct fw_cdev_deallocate request
;
479 struct address_handler
*handler
;
482 if (copy_from_user(&request
, arg
, sizeof request
))
485 spin_lock_irqsave(&client
->lock
, flags
);
486 list_for_each_entry(handler
, &client
->handler_list
, link
) {
487 if (handler
->handler
.offset
== request
.offset
) {
488 list_del(&handler
->link
);
492 spin_unlock_irqrestore(&client
->lock
, flags
);
494 if (&handler
->link
== &client
->handler_list
)
497 fw_core_remove_address_handler(&handler
->handler
);
502 static int ioctl_send_response(struct client
*client
, void __user
*arg
)
504 struct fw_cdev_send_response request
;
508 if (copy_from_user(&request
, arg
, sizeof request
))
511 spin_lock_irqsave(&client
->lock
, flags
);
512 list_for_each_entry(r
, &client
->request_list
, link
) {
513 if (r
->serial
== request
.serial
) {
518 spin_unlock_irqrestore(&client
->lock
, flags
);
520 if (&r
->link
== &client
->request_list
)
523 if (request
.length
< r
->length
)
524 r
->length
= request
.length
;
525 if (copy_from_user(r
->data
, u64_to_uptr(request
.data
), r
->length
))
528 fw_send_response(client
->device
->card
, r
->request
, request
.rcode
);
535 static int ioctl_initiate_bus_reset(struct client
*client
, void __user
*arg
)
537 struct fw_cdev_initiate_bus_reset request
;
540 if (copy_from_user(&request
, arg
, sizeof request
))
543 short_reset
= (request
.type
== FW_CDEV_SHORT_RESET
);
545 return fw_core_initiate_bus_reset(client
->device
->card
, short_reset
);
549 struct fw_descriptor d
;
550 struct list_head link
;
555 static int ioctl_add_descriptor(struct client
*client
, void __user
*arg
)
557 struct fw_cdev_add_descriptor request
;
558 struct descriptor
*descriptor
;
562 if (copy_from_user(&request
, arg
, sizeof request
))
565 if (request
.length
> 256)
569 kmalloc(sizeof *descriptor
+ request
.length
* 4, GFP_KERNEL
);
570 if (descriptor
== NULL
)
573 if (copy_from_user(descriptor
->data
,
574 u64_to_uptr(request
.data
), request
.length
* 4)) {
579 descriptor
->d
.length
= request
.length
;
580 descriptor
->d
.immediate
= request
.immediate
;
581 descriptor
->d
.key
= request
.key
;
582 descriptor
->d
.data
= descriptor
->data
;
584 retval
= fw_core_add_descriptor(&descriptor
->d
);
590 spin_lock_irqsave(&client
->lock
, flags
);
591 list_add_tail(&descriptor
->link
, &client
->descriptor_list
);
592 descriptor
->handle
= client
->resource_handle
++;
593 spin_unlock_irqrestore(&client
->lock
, flags
);
595 request
.handle
= descriptor
->handle
;
596 if (copy_to_user(arg
, &request
, sizeof request
))
602 static int ioctl_remove_descriptor(struct client
*client
, void __user
*arg
)
604 struct fw_cdev_remove_descriptor request
;
605 struct descriptor
*d
;
608 if (copy_from_user(&request
, arg
, sizeof request
))
611 spin_lock_irqsave(&client
->lock
, flags
);
612 list_for_each_entry(d
, &client
->descriptor_list
, link
) {
613 if (d
->handle
== request
.handle
) {
618 spin_unlock_irqrestore(&client
->lock
, flags
);
620 if (&d
->link
== &client
->descriptor_list
)
623 fw_core_remove_descriptor(&d
->d
);
630 iso_callback(struct fw_iso_context
*context
, u32 cycle
,
631 size_t header_length
, void *header
, void *data
)
633 struct client
*client
= data
;
634 struct iso_interrupt
*interrupt
;
636 interrupt
= kzalloc(sizeof *interrupt
+ header_length
, GFP_ATOMIC
);
637 if (interrupt
== NULL
)
640 interrupt
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
641 interrupt
->interrupt
.closure
= 0;
642 interrupt
->interrupt
.cycle
= cycle
;
643 interrupt
->interrupt
.header_length
= header_length
;
644 memcpy(interrupt
->interrupt
.header
, header
, header_length
);
645 queue_event(client
, &interrupt
->event
,
646 &interrupt
->interrupt
,
647 sizeof interrupt
->interrupt
+ header_length
, NULL
, 0);
650 static int ioctl_create_iso_context(struct client
*client
, void __user
*arg
)
652 struct fw_cdev_create_iso_context request
;
654 if (copy_from_user(&request
, arg
, sizeof request
))
657 if (request
.channel
> 63)
660 switch (request
.type
) {
661 case FW_ISO_CONTEXT_RECEIVE
:
662 if (request
.header_size
< 4 || (request
.header_size
& 3))
667 case FW_ISO_CONTEXT_TRANSMIT
:
668 if (request
.speed
> SCODE_3200
)
677 client
->iso_context
= fw_iso_context_create(client
->device
->card
,
682 iso_callback
, client
);
683 if (IS_ERR(client
->iso_context
))
684 return PTR_ERR(client
->iso_context
);
689 static int ioctl_queue_iso(struct client
*client
, void __user
*arg
)
691 struct fw_cdev_queue_iso request
;
692 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
693 struct fw_iso_context
*ctx
= client
->iso_context
;
694 unsigned long payload
, payload_end
, header_length
;
697 struct fw_iso_packet packet
;
703 if (copy_from_user(&request
, arg
, sizeof request
))
706 /* If the user passes a non-NULL data pointer, has mmap()'ed
707 * the iso buffer, and the pointer points inside the buffer,
708 * we setup the payload pointers accordingly. Otherwise we
709 * set them both to 0, which will still let packets with
710 * payload_length == 0 through. In other words, if no packets
711 * use the indirect payload, the iso buffer need not be mapped
712 * and the request.data pointer is ignored.*/
714 payload
= (unsigned long)request
.data
- client
->vm_start
;
715 payload_end
= payload
+ (client
->buffer
.page_count
<< PAGE_SHIFT
);
716 if (request
.data
== 0 || client
->buffer
.pages
== NULL
||
717 payload
>= payload_end
) {
722 if (!access_ok(VERIFY_READ
, request
.packets
, request
.size
))
725 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(request
.packets
);
726 end
= (void __user
*)p
+ request
.size
;
729 if (__copy_from_user(&u
.packet
, p
, sizeof *p
))
732 if (ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
) {
733 header_length
= u
.packet
.header_length
;
735 /* We require that header_length is a multiple of
736 * the fixed header size, ctx->header_size */
737 if (ctx
->header_size
== 0) {
738 if (u
.packet
.header_length
> 0)
740 } else if (u
.packet
.header_length
% ctx
->header_size
!= 0) {
746 next
= (struct fw_cdev_iso_packet __user
*)
747 &p
->header
[header_length
/ 4];
751 (u
.packet
.header
, p
->header
, header_length
))
753 if (u
.packet
.skip
&& ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
&&
754 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
756 if (payload
+ u
.packet
.payload_length
> payload_end
)
759 if (fw_iso_context_queue(ctx
, &u
.packet
,
760 &client
->buffer
, payload
))
764 payload
+= u
.packet
.payload_length
;
768 request
.size
-= uptr_to_u64(p
) - request
.packets
;
769 request
.packets
= uptr_to_u64(p
);
770 request
.data
= client
->vm_start
+ payload
;
772 if (copy_to_user(arg
, &request
, sizeof request
))
778 static int ioctl_start_iso(struct client
*client
, void __user
*arg
)
780 struct fw_cdev_start_iso request
;
782 if (copy_from_user(&request
, arg
, sizeof request
))
785 if (client
->iso_context
->type
== FW_ISO_CONTEXT_RECEIVE
) {
786 if (request
.tags
== 0 || request
.tags
> 15)
789 if (request
.sync
> 15)
793 return fw_iso_context_start(client
->iso_context
,
794 request
.cycle
, request
.sync
, request
.tags
);
797 static int ioctl_stop_iso(struct client
*client
, void __user
*arg
)
799 return fw_iso_context_stop(client
->iso_context
);
803 dispatch_ioctl(struct client
*client
, unsigned int cmd
, void __user
*arg
)
806 case FW_CDEV_IOC_GET_INFO
:
807 return ioctl_get_info(client
, arg
);
808 case FW_CDEV_IOC_SEND_REQUEST
:
809 return ioctl_send_request(client
, arg
);
810 case FW_CDEV_IOC_ALLOCATE
:
811 return ioctl_allocate(client
, arg
);
812 case FW_CDEV_IOC_DEALLOCATE
:
813 return ioctl_deallocate(client
, arg
);
814 case FW_CDEV_IOC_SEND_RESPONSE
:
815 return ioctl_send_response(client
, arg
);
816 case FW_CDEV_IOC_INITIATE_BUS_RESET
:
817 return ioctl_initiate_bus_reset(client
, arg
);
818 case FW_CDEV_IOC_ADD_DESCRIPTOR
:
819 return ioctl_add_descriptor(client
, arg
);
820 case FW_CDEV_IOC_REMOVE_DESCRIPTOR
:
821 return ioctl_remove_descriptor(client
, arg
);
822 case FW_CDEV_IOC_CREATE_ISO_CONTEXT
:
823 return ioctl_create_iso_context(client
, arg
);
824 case FW_CDEV_IOC_QUEUE_ISO
:
825 return ioctl_queue_iso(client
, arg
);
826 case FW_CDEV_IOC_START_ISO
:
827 return ioctl_start_iso(client
, arg
);
828 case FW_CDEV_IOC_STOP_ISO
:
829 return ioctl_stop_iso(client
, arg
);
836 fw_device_op_ioctl(struct file
*file
,
837 unsigned int cmd
, unsigned long arg
)
839 struct client
*client
= file
->private_data
;
841 return dispatch_ioctl(client
, cmd
, (void __user
*) arg
);
846 fw_device_op_compat_ioctl(struct file
*file
,
847 unsigned int cmd
, unsigned long arg
)
849 struct client
*client
= file
->private_data
;
851 return dispatch_ioctl(client
, cmd
, compat_ptr(arg
));
855 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
857 struct client
*client
= file
->private_data
;
858 enum dma_data_direction direction
;
860 int page_count
, retval
;
862 /* FIXME: We could support multiple buffers, but we don't. */
863 if (client
->buffer
.pages
!= NULL
)
866 if (!(vma
->vm_flags
& VM_SHARED
))
869 if (vma
->vm_start
& ~PAGE_MASK
)
872 client
->vm_start
= vma
->vm_start
;
873 size
= vma
->vm_end
- vma
->vm_start
;
874 page_count
= size
>> PAGE_SHIFT
;
875 if (size
& ~PAGE_MASK
)
878 if (vma
->vm_flags
& VM_WRITE
)
879 direction
= DMA_TO_DEVICE
;
881 direction
= DMA_FROM_DEVICE
;
883 retval
= fw_iso_buffer_init(&client
->buffer
, client
->device
->card
,
884 page_count
, direction
);
888 retval
= fw_iso_buffer_map(&client
->buffer
, vma
);
890 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
895 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
897 struct client
*client
= file
->private_data
;
898 struct address_handler
*h
, *next_h
;
899 struct request
*r
, *next_r
;
900 struct event
*e
, *next_e
;
901 struct response
*t
, *next_t
;
902 struct descriptor
*d
, *next_d
;
905 if (client
->buffer
.pages
)
906 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
908 if (client
->iso_context
)
909 fw_iso_context_destroy(client
->iso_context
);
911 list_for_each_entry_safe(h
, next_h
, &client
->handler_list
, link
) {
912 fw_core_remove_address_handler(&h
->handler
);
916 list_for_each_entry_safe(r
, next_r
, &client
->request_list
, link
) {
917 fw_send_response(client
->device
->card
, r
->request
,
918 RCODE_CONFLICT_ERROR
);
922 list_for_each_entry_safe(t
, next_t
, &client
->transaction_list
, link
) {
923 fw_cancel_transaction(client
->device
->card
, &t
->transaction
);
927 list_for_each_entry_safe(d
, next_d
, &client
->descriptor_list
, link
) {
928 fw_core_remove_descriptor(&d
->d
);
932 /* FIXME: We should wait for the async tasklets to stop
933 * running before freeing the memory. */
935 list_for_each_entry_safe(e
, next_e
, &client
->event_list
, link
)
938 spin_lock_irqsave(&client
->device
->card
->lock
, flags
);
939 list_del(&client
->link
);
940 spin_unlock_irqrestore(&client
->device
->card
->lock
, flags
);
942 fw_device_put(client
->device
);
948 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
950 struct client
*client
= file
->private_data
;
951 unsigned int mask
= 0;
953 poll_wait(file
, &client
->wait
, pt
);
955 if (fw_device_is_shutdown(client
->device
))
956 mask
|= POLLHUP
| POLLERR
;
957 if (!list_empty(&client
->event_list
))
958 mask
|= POLLIN
| POLLRDNORM
;
963 const struct file_operations fw_device_ops
= {
964 .owner
= THIS_MODULE
,
965 .open
= fw_device_op_open
,
966 .read
= fw_device_op_read
,
967 .unlocked_ioctl
= fw_device_op_ioctl
,
968 .poll
= fw_device_op_poll
,
969 .release
= fw_device_op_release
,
970 .mmap
= fw_device_op_mmap
,
973 .compat_ioctl
= fw_device_op_compat_ioctl
,