include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / vme / devices / vme_user.c
CommitLineData
f00a86d9
MW
1/*
2 * VMEbus User access driver
3 *
66bd8db5
MW
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
f00a86d9
MW
6 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/cdev.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/dma-mapping.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/ioctl.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/module.h>
28#include <linux/pagemap.h>
29#include <linux/pci.h>
30#include <linux/semaphore.h>
5a0e3ad6 31#include <linux/slab.h>
f00a86d9
MW
32#include <linux/spinlock.h>
33#include <linux/syscalls.h>
34#include <linux/types.h>
f00a86d9
MW
35
36#include <asm/io.h>
37#include <asm/uaccess.h>
38
39#include "../vme.h"
40#include "vme_user.h"
41
238add52
MW
42static char driver_name[] = "vme_user";
43
44static int bus[USER_BUS_MAX];
45static int bus_num;
46
f00a86d9
MW
47/* Currently Documentation/devices.txt defines the following for VME:
48 *
49 * 221 char VME bus
50 * 0 = /dev/bus/vme/m0 First master image
51 * 1 = /dev/bus/vme/m1 Second master image
52 * 2 = /dev/bus/vme/m2 Third master image
53 * 3 = /dev/bus/vme/m3 Fourth master image
54 * 4 = /dev/bus/vme/s0 First slave image
55 * 5 = /dev/bus/vme/s1 Second slave image
56 * 6 = /dev/bus/vme/s2 Third slave image
57 * 7 = /dev/bus/vme/s3 Fourth slave image
58 * 8 = /dev/bus/vme/ctl Control
59 *
60 * It is expected that all VME bus drivers will use the
61 * same interface. For interface documentation see
62 * http://www.vmelinux.org/.
63 *
64 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
65 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
66 * We'll run with this or now as far as possible, however it probably makes
67 * sense to get rid of the old mappings and just do everything dynamically.
68 *
69 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
70 * defined above and try to support at least some of the interface from
71 * http://www.vmelinux.org/ as an alternative drive can be written providing a
72 * saner interface later.
238add52
MW
73 *
74 * The vmelinux.org driver never supported slave images, the devices reserved
75 * for slaves were repurposed to support all 8 master images on the UniverseII!
76 * We shall support 4 masters and 4 slaves with this driver.
f00a86d9
MW
77 */
78#define VME_MAJOR 221 /* VME Major Device Number */
79#define VME_DEVS 9 /* Number of dev entries */
80
81#define MASTER_MINOR 0
82#define MASTER_MAX 3
83#define SLAVE_MINOR 4
84#define SLAVE_MAX 7
85#define CONTROL_MINOR 8
86
87#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
88
89/*
90 * Structure to handle image related parameters.
91 */
92typedef struct {
93 void __iomem *kern_buf; /* Buffer address in kernel space */
94 dma_addr_t pci_buf; /* Buffer address in PCI address space */
95 unsigned long long size_buf; /* Buffer size */
96 struct semaphore sem; /* Semaphore for locking image */
97 struct device *device; /* Sysfs device */
98 struct vme_resource *resource; /* VME resource */
99 int users; /* Number of current users */
100} image_desc_t;
101static image_desc_t image[VME_DEVS];
102
103typedef struct {
104 unsigned long reads;
105 unsigned long writes;
106 unsigned long ioctls;
107 unsigned long irqs;
108 unsigned long berrs;
109 unsigned long dmaErrors;
110 unsigned long timeouts;
111 unsigned long external;
112} driver_stats_t;
113static driver_stats_t statistics;
114
115struct cdev *vme_user_cdev; /* Character device */
116struct class *vme_user_sysfs_class; /* Sysfs class */
117struct device *vme_user_bridge; /* Pointer to the bridge device */
118
f00a86d9
MW
119
120static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
121 MASTER_MINOR, MASTER_MINOR,
122 SLAVE_MINOR, SLAVE_MINOR,
123 SLAVE_MINOR, SLAVE_MINOR,
124 CONTROL_MINOR
125 };
126
127
128static int vme_user_open(struct inode *, struct file *);
129static int vme_user_release(struct inode *, struct file *);
130static ssize_t vme_user_read(struct file *, char *, size_t, loff_t *);
131static ssize_t vme_user_write(struct file *, const char *, size_t, loff_t *);
132static loff_t vme_user_llseek(struct file *, loff_t, int);
133static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
134 unsigned long);
135
238add52
MW
136static int __init vme_user_probe(struct device *, int, int);
137static int __exit vme_user_remove(struct device *, int, int);
f00a86d9
MW
138
139static struct file_operations vme_user_fops = {
140 .open = vme_user_open,
141 .release = vme_user_release,
142 .read = vme_user_read,
143 .write = vme_user_write,
144 .llseek = vme_user_llseek,
145 .ioctl = vme_user_ioctl,
146};
147
148
149/*
150 * Reset all the statistic counters
151 */
152static void reset_counters(void)
153{
154 statistics.reads = 0;
155 statistics.writes = 0;
156 statistics.ioctls = 0;
157 statistics.irqs = 0;
158 statistics.berrs = 0;
159 statistics.dmaErrors = 0;
160 statistics.timeouts = 0;
161}
162
f00a86d9
MW
163static int vme_user_open(struct inode *inode, struct file *file)
164{
165 int err;
166 unsigned int minor = MINOR(inode->i_rdev);
167
168 down(&image[minor].sem);
169 /* Only allow device to be opened if a resource is allocated */
170 if (image[minor].resource == NULL) {
171 printk(KERN_ERR "No resources allocated for device\n");
172 err = -EINVAL;
173 goto err_res;
174 }
175
176 /* Increment user count */
177 image[minor].users++;
178
179 up(&image[minor].sem);
180
181 return 0;
182
183err_res:
184 up(&image[minor].sem);
185
186 return err;
187}
188
189static int vme_user_release(struct inode *inode, struct file *file)
190{
191 unsigned int minor = MINOR(inode->i_rdev);
192
193 down(&image[minor].sem);
194
195 /* Decrement user count */
196 image[minor].users--;
197
198 up(&image[minor].sem);
199
200 return 0;
201}
202
203/*
204 * We are going ot alloc a page during init per window for small transfers.
205 * Small transfers will go VME -> buffer -> user space. Larger (more than a
206 * page) transfers will lock the user space buffer into memory and then
207 * transfer the data directly into the user space buffers.
208 */
209static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
210 loff_t *ppos)
211{
212 ssize_t retval;
213 ssize_t copied = 0;
214
215 if (count <= image[minor].size_buf) {
216 /* We copy to kernel buffer */
217 copied = vme_master_read(image[minor].resource,
218 image[minor].kern_buf, count, *ppos);
219 if (copied < 0) {
220 return (int)copied;
221 }
222
223 retval = __copy_to_user(buf, image[minor].kern_buf,
224 (unsigned long)copied);
225 if (retval != 0) {
226 copied = (copied - retval);
227 printk("User copy failed\n");
228 return -EINVAL;
229 }
230
231 } else {
232 /* XXX Need to write this */
233 printk("Currently don't support large transfers\n");
234 /* Map in pages from userspace */
235
236 /* Call vme_master_read to do the transfer */
237 return -EINVAL;
238 }
239
240 return copied;
241}
242
243/*
244 * We are going ot alloc a page during init per window for small transfers.
245 * Small transfers will go user space -> buffer -> VME. Larger (more than a
246 * page) transfers will lock the user space buffer into memory and then
247 * transfer the data directly from the user space buffers out to VME.
248 */
249static ssize_t resource_from_user(unsigned int minor, const char *buf,
250 size_t count, loff_t *ppos)
251{
252 ssize_t retval;
253 ssize_t copied = 0;
254
255 if (count <= image[minor].size_buf) {
256 retval = __copy_from_user(image[minor].kern_buf, buf,
257 (unsigned long)count);
258 if (retval != 0)
259 copied = (copied - retval);
260 else
261 copied = count;
262
263 copied = vme_master_write(image[minor].resource,
264 image[minor].kern_buf, copied, *ppos);
265 } else {
266 /* XXX Need to write this */
267 printk("Currently don't support large transfers\n");
268 /* Map in pages from userspace */
269
270 /* Call vme_master_write to do the transfer */
271 return -EINVAL;
272 }
273
274 return copied;
275}
276
277static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
278 size_t count, loff_t *ppos)
279{
280 void __iomem *image_ptr;
281 ssize_t retval;
282
283 image_ptr = image[minor].kern_buf + *ppos;
284
285 retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
286 if (retval != 0) {
287 retval = (count - retval);
288 printk(KERN_WARNING "Partial copy to userspace\n");
289 } else
290 retval = count;
291
292 /* Return number of bytes successfully read */
293 return retval;
294}
295
296static ssize_t buffer_from_user(unsigned int minor, const char *buf,
297 size_t count, loff_t *ppos)
298{
299 void __iomem *image_ptr;
300 size_t retval;
301
302 image_ptr = image[minor].kern_buf + *ppos;
303
304 retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
305 if (retval != 0) {
306 retval = (count - retval);
307 printk(KERN_WARNING "Partial copy to userspace\n");
308 } else
309 retval = count;
310
311 /* Return number of bytes successfully read */
312 return retval;
313}
314
315static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
316 loff_t * ppos)
317{
318 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
319 ssize_t retval;
320 size_t image_size;
321 size_t okcount;
322
323 down(&image[minor].sem);
324
325 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
326 image_size = vme_get_size(image[minor].resource);
327
328 /* Ensure we are starting at a valid location */
329 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
330 up(&image[minor].sem);
331 return 0;
332 }
333
334 /* Ensure not reading past end of the image */
335 if (*ppos + count > image_size)
336 okcount = image_size - *ppos;
337 else
338 okcount = count;
339
340 switch (type[minor]){
341 case MASTER_MINOR:
342 retval = resource_to_user(minor, buf, okcount, ppos);
343 break;
344 case SLAVE_MINOR:
345 retval = buffer_to_user(minor, buf, okcount, ppos);
346 break;
347 default:
348 retval = -EINVAL;
349 }
350
351 up(&image[minor].sem);
352
353 if (retval > 0)
354 *ppos += retval;
355
356 return retval;
357}
358
359static ssize_t vme_user_write(struct file *file, const char *buf, size_t count,
360 loff_t *ppos)
361{
362 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
363 ssize_t retval;
364 size_t image_size;
365 size_t okcount;
366
367 down(&image[minor].sem);
368
369 image_size = vme_get_size(image[minor].resource);
370
371 /* Ensure we are starting at a valid location */
372 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
373 up(&image[minor].sem);
374 return 0;
375 }
376
377 /* Ensure not reading past end of the image */
378 if (*ppos + count > image_size)
379 okcount = image_size - *ppos;
380 else
381 okcount = count;
382
383 switch (type[minor]){
384 case MASTER_MINOR:
385 retval = resource_from_user(minor, buf, okcount, ppos);
386 break;
387 case SLAVE_MINOR:
388 retval = buffer_from_user(minor, buf, okcount, ppos);
389 break;
390 default:
391 retval = -EINVAL;
392 }
393
394 up(&image[minor].sem);
395
396 if (retval > 0)
397 *ppos += retval;
398
399 return retval;
400}
401
402static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
403{
877de4b4
AB
404 loff_t absolute = -1;
405 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
406 size_t image_size;
407
408 down(&image[minor].sem);
409 image_size = vme_get_size(image[minor].resource);
410
411 switch (whence) {
412 case SEEK_SET:
413 absolute = off;
414 break;
415 case SEEK_CUR:
416 absolute = file->f_pos + off;
417 break;
418 case SEEK_END:
419 absolute = image_size + off;
420 break;
421 default:
422 up(&image[minor].sem);
423 return -EINVAL;
424 break;
425 }
426
427 if ((absolute < 0) || (absolute >= image_size)) {
428 up(&image[minor].sem);
429 return -EINVAL;
430 }
431
432 file->f_pos = absolute;
433
434 up(&image[minor].sem);
435
436 return absolute;
f00a86d9
MW
437}
438
238add52
MW
439/*
440 * The ioctls provided by the old VME access method (the one at vmelinux.org)
441 * are most certainly wrong as the effectively push the registers layout
442 * through to user space. Given that the VME core can handle multiple bridges,
443 * with different register layouts this is most certainly not the way to go.
444 *
445 * We aren't using the structures defined in the Motorola driver either - these
446 * are also quite low level, however we should use the definitions that have
447 * already been defined.
448 */
f00a86d9
MW
449static int vme_user_ioctl(struct inode *inode, struct file *file,
450 unsigned int cmd, unsigned long arg)
451{
238add52
MW
452 struct vme_master master;
453 struct vme_slave slave;
454 unsigned long copied;
f00a86d9 455 unsigned int minor = MINOR(inode->i_rdev);
238add52
MW
456 int retval;
457 dma_addr_t pci_addr;
f00a86d9
MW
458
459 statistics.ioctls++;
238add52 460
f00a86d9
MW
461 switch (type[minor]) {
462 case CONTROL_MINOR:
463 break;
464 case MASTER_MINOR:
f00a86d9 465 switch (cmd) {
238add52
MW
466 case VME_GET_MASTER:
467 memset(&master, 0, sizeof(struct vme_master));
468
469 /* XXX We do not want to push aspace, cycle and width
470 * to userspace as they are
471 */
472 retval = vme_master_get(image[minor].resource,
473 &(master.enable), &(master.vme_addr),
474 &(master.size), &(master.aspace),
475 &(master.cycle), &(master.dwidth));
476
477 copied = copy_to_user((char *)arg, &master,
478 sizeof(struct vme_master));
479 if (copied != 0) {
480 printk(KERN_WARNING "Partial copy to "
481 "userspace\n");
482 return -EFAULT;
483 }
f00a86d9 484
238add52
MW
485 return retval;
486 break;
487
488 case VME_SET_MASTER:
489
490 copied = copy_from_user(&master, (char *)arg,
491 sizeof(master));
492 if (copied != 0) {
f00a86d9
MW
493 printk(KERN_WARNING "Partial copy from "
494 "userspace\n");
495 return -EFAULT;
496 }
497
238add52
MW
498 /* XXX We do not want to push aspace, cycle and width
499 * to userspace as they are
500 */
501 return vme_master_set(image[minor].resource,
502 master.enable, master.vme_addr, master.size,
503 master.aspace, master.cycle, master.dwidth);
f00a86d9
MW
504
505 break;
238add52
MW
506 }
507 break;
508 case SLAVE_MINOR:
509 switch (cmd) {
f00a86d9 510 case VME_GET_SLAVE:
238add52
MW
511 memset(&slave, 0, sizeof(struct vme_slave));
512
513 /* XXX We do not want to push aspace, cycle and width
514 * to userspace as they are
515 */
516 retval = vme_slave_get(image[minor].resource,
517 &(slave.enable), &(slave.vme_addr),
518 &(slave.size), &pci_addr, &(slave.aspace),
519 &(slave.cycle));
520
521 copied = copy_to_user((char *)arg, &slave,
522 sizeof(struct vme_slave));
523 if (copied != 0) {
524 printk(KERN_WARNING "Partial copy to "
525 "userspace\n");
526 return -EFAULT;
527 }
528
529 return retval;
530 break;
f00a86d9 531
238add52 532 case VME_SET_SLAVE:
f00a86d9 533
238add52 534 copied = copy_from_user(&slave, (char *)arg,
f00a86d9 535 sizeof(slave));
238add52
MW
536 if (copied != 0) {
537 printk(KERN_WARNING "Partial copy from "
f00a86d9
MW
538 "userspace\n");
539 return -EFAULT;
540 }
541
238add52
MW
542 /* XXX We do not want to push aspace, cycle and width
543 * to userspace as they are
544 */
545 return vme_slave_set(image[minor].resource,
546 slave.enable, slave.vme_addr, slave.size,
547 image[minor].pci_buf, slave.aspace,
548 slave.cycle);
549
f00a86d9 550 break;
f00a86d9
MW
551 }
552 break;
553 }
554
555 return -EINVAL;
556}
557
558
559/*
560 * Unallocate a previously allocated buffer
561 */
562static void buf_unalloc (int num)
563{
564 if (image[num].kern_buf) {
565#ifdef VME_DEBUG
566 printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n",
567 image[num].pci_buf);
568#endif
569
570 vme_free_consistent(image[num].resource, image[num].size_buf,
571 image[num].kern_buf, image[num].pci_buf);
572
573 image[num].kern_buf = NULL;
574 image[num].pci_buf = 0;
575 image[num].size_buf = 0;
576
577#ifdef VME_DEBUG
578 } else {
579 printk(KERN_DEBUG "UniverseII: Buffer not allocated\n");
580#endif
581 }
582}
583
584static struct vme_driver vme_user_driver = {
585 .name = driver_name,
586 .probe = vme_user_probe,
238add52 587 .remove = vme_user_remove,
f00a86d9
MW
588};
589
590
238add52 591static int __init vme_user_init(void)
f00a86d9 592{
238add52
MW
593 int retval = 0;
594 int i;
595 struct vme_device_id *ids;
596
f00a86d9 597 printk(KERN_INFO "VME User Space Access Driver\n");
238add52
MW
598
599 if (bus_num == 0) {
600 printk(KERN_ERR "%s: No cards, skipping registration\n",
601 driver_name);
602 goto err_nocard;
603 }
604
605 /* Let's start by supporting one bus, we can support more than one
606 * in future revisions if that ever becomes necessary.
607 */
608 if (bus_num > USER_BUS_MAX) {
51616e21
MW
609 printk(KERN_ERR "%s: Driver only able to handle %d buses\n",
610 driver_name, USER_BUS_MAX);
238add52
MW
611 bus_num = USER_BUS_MAX;
612 }
613
614
615 /* Dynamically create the bind table based on module parameters */
616 ids = kmalloc(sizeof(struct vme_device_id) * (bus_num + 1), GFP_KERNEL);
617 if (ids == NULL) {
618 printk(KERN_ERR "%s: Unable to allocate ID table\n",
619 driver_name);
620 goto err_id;
621 }
622
623 memset(ids, 0, (sizeof(struct vme_device_id) * (bus_num + 1)));
624
625 for (i = 0; i < bus_num; i++) {
626 ids[i].bus = bus[i];
627 /*
628 * We register the driver against the slot occupied by *this*
629 * card, since it's really a low level way of controlling
630 * the VME bridge
631 */
632 ids[i].slot = VME_SLOT_CURRENT;
633 }
634
635 vme_user_driver.bind_table = ids;
636
f00a86d9 637 retval = vme_register_driver(&vme_user_driver);
238add52
MW
638 if (retval != 0)
639 goto err_reg;
640
641 return retval;
642
643 vme_unregister_driver(&vme_user_driver);
644err_reg:
645 kfree(ids);
646err_id:
647err_nocard:
f00a86d9
MW
648 return retval;
649}
650
651/*
238add52
MW
652 * In this simple access driver, the old behaviour is being preserved as much
653 * as practical. We will therefore reserve the buffers and request the images
654 * here so that we don't have to do it later.
f00a86d9 655 */
238add52 656static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
f00a86d9
MW
657{
658 int i, err;
beb9ccc6 659 char name[12];
f00a86d9 660
238add52
MW
661 /* Save pointer to the bridge device */
662 if (vme_user_bridge != NULL) {
663 printk(KERN_ERR "%s: Driver can only be loaded for 1 device\n",
664 driver_name);
665 err = -EINVAL;
666 goto err_dev;
667 }
f00a86d9
MW
668 vme_user_bridge = dev;
669
670 /* Initialise descriptors */
671 for (i = 0; i < VME_DEVS; i++) {
672 image[i].kern_buf = NULL;
673 image[i].pci_buf = 0;
674 init_MUTEX(&(image[i].sem));
675 image[i].device = NULL;
676 image[i].resource = NULL;
677 image[i].users = 0;
678 }
679
680 /* Initialise statistics counters */
681 reset_counters();
682
683 /* Assign major and minor numbers for the driver */
684 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
685 driver_name);
686 if (err) {
687 printk(KERN_WARNING "%s: Error getting Major Number %d for "
688 "driver.\n", driver_name, VME_MAJOR);
689 goto err_region;
690 }
691
692 /* Register the driver as a char device */
693 vme_user_cdev = cdev_alloc();
694 vme_user_cdev->ops = &vme_user_fops;
695 vme_user_cdev->owner = THIS_MODULE;
696 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
697 if (err) {
698 printk(KERN_WARNING "%s: cdev_all failed\n", driver_name);
699 goto err_char;
700 }
701
702 /* Request slave resources and allocate buffers (128kB wide) */
703 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
704 /* XXX Need to properly request attributes */
5188d74c
AB
705 /* For ca91cx42 bridge there are only two slave windows
706 * supporting A16 addressing, so we request A24 supported
707 * by all windows.
708 */
f00a86d9 709 image[i].resource = vme_slave_request(vme_user_bridge,
5188d74c 710 VME_A24, VME_SCT);
f00a86d9
MW
711 if (image[i].resource == NULL) {
712 printk(KERN_WARNING "Unable to allocate slave "
713 "resource\n");
238add52 714 goto err_slave;
f00a86d9
MW
715 }
716 image[i].size_buf = PCI_BUF_SIZE;
717 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
718 image[i].size_buf, &(image[i].pci_buf));
719 if (image[i].kern_buf == NULL) {
720 printk(KERN_WARNING "Unable to allocate memory for "
721 "buffer\n");
722 image[i].pci_buf = 0;
723 vme_slave_free(image[i].resource);
724 err = -ENOMEM;
238add52 725 goto err_slave;
f00a86d9
MW
726 }
727 }
728
729 /*
730 * Request master resources allocate page sized buffers for small
731 * reads and writes
732 */
733 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
734 /* XXX Need to properly request attributes */
735 image[i].resource = vme_master_request(vme_user_bridge,
736 VME_A32, VME_SCT, VME_D32);
737 if (image[i].resource == NULL) {
738 printk(KERN_WARNING "Unable to allocate master "
739 "resource\n");
238add52 740 goto err_master;
f00a86d9 741 }
33e920d9
AB
742 image[i].size_buf = PCI_BUF_SIZE;
743 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
744 if (image[i].kern_buf == NULL) {
745 printk(KERN_WARNING "Unable to allocate memory for "
746 "master window buffers\n");
747 err = -ENOMEM;
748 goto err_master_buf;
749 }
f00a86d9
MW
750 }
751
752 /* Create sysfs entries - on udev systems this creates the dev files */
753 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
754 if (IS_ERR(vme_user_sysfs_class)) {
755 printk(KERN_ERR "Error creating vme_user class.\n");
756 err = PTR_ERR(vme_user_sysfs_class);
757 goto err_class;
758 }
759
760 /* Add sysfs Entries */
761 for (i=0; i<VME_DEVS; i++) {
762 switch (type[i]) {
763 case MASTER_MINOR:
764 sprintf(name,"bus/vme/m%%d");
765 break;
766 case CONTROL_MINOR:
767 sprintf(name,"bus/vme/ctl");
768 break;
769 case SLAVE_MINOR:
770 sprintf(name,"bus/vme/s%%d");
771 break;
772 default:
773 err = -EINVAL;
774 goto err_sysfs;
775 break;
776 }
777
778 image[i].device =
779 device_create(vme_user_sysfs_class, NULL,
780 MKDEV(VME_MAJOR, i), NULL, name,
781 (type[i] == SLAVE_MINOR)? i - (MASTER_MAX + 1) : i);
782 if (IS_ERR(image[i].device)) {
783 printk("%s: Error creating sysfs device\n",
784 driver_name);
785 err = PTR_ERR(image[i].device);
786 goto err_sysfs;
787 }
788 }
789
f00a86d9
MW
790 return 0;
791
792 /* Ensure counter set correcty to destroy all sysfs devices */
793 i = VME_DEVS;
794err_sysfs:
795 while (i > 0){
796 i--;
797 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
798 }
799 class_destroy(vme_user_sysfs_class);
800
238add52
MW
801 /* Ensure counter set correcty to unalloc all master windows */
802 i = MASTER_MAX + 1;
33e920d9
AB
803err_master_buf:
804 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
805 kfree(image[i].kern_buf);
238add52
MW
806err_master:
807 while (i > MASTER_MINOR) {
808 i--;
809 vme_master_free(image[i].resource);
810 }
811
812 /*
813 * Ensure counter set correcty to unalloc all slave windows and buffers
814 */
f00a86d9 815 i = SLAVE_MAX + 1;
238add52
MW
816err_slave:
817 while (i > SLAVE_MINOR) {
f00a86d9 818 i--;
f00a86d9
MW
819 vme_slave_free(image[i].resource);
820 buf_unalloc(i);
821 }
822err_class:
823 cdev_del(vme_user_cdev);
824err_char:
825 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
826err_region:
238add52 827err_dev:
f00a86d9
MW
828 return err;
829}
830
238add52 831static int __exit vme_user_remove(struct device *dev, int cur_bus, int cur_slot)
f00a86d9
MW
832{
833 int i;
834
835 /* Remove sysfs Entries */
836 for(i=0; i<VME_DEVS; i++) {
837 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
838 }
839 class_destroy(vme_user_sysfs_class);
840
33e920d9
AB
841 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
842 kfree(image[i].kern_buf);
843
f00a86d9 844 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
238add52
MW
845 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
846 vme_slave_free(image[i].resource);
f00a86d9
MW
847 buf_unalloc(i);
848 }
849
850 /* Unregister device driver */
851 cdev_del(vme_user_cdev);
852
853 /* Unregiser the major and minor device numbers */
854 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
238add52
MW
855
856 return 0;
f00a86d9
MW
857}
858
238add52
MW
859static void __exit vme_user_exit(void)
860{
861 vme_unregister_driver(&vme_user_driver);
862
863 kfree(vme_user_driver.bind_table);
864}
865
866
867MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
868module_param_array(bus, int, &bus_num, 0);
869
f00a86d9 870MODULE_DESCRIPTION("VME User Space Access Driver");
66bd8db5 871MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
f00a86d9
MW
872MODULE_LICENSE("GPL");
873
238add52
MW
874module_init(vme_user_init);
875module_exit(vme_user_exit);