crypto: AF_ALG - remove SGL terminator indicator when chaining
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / char_dev.c
1 /*
2 * linux/fs/char_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23 #include <linux/tty.h>
24
25 #include "internal.h"
26
27 /*
28 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
29 * devices
30 * - permits shared-mmap for read, write and/or exec
31 * - does not permit private mmap in NOMMU mode (can't do COW)
32 * - no readahead or I/O queue unplugging required
33 */
34 struct backing_dev_info directly_mappable_cdev_bdi = {
35 .name = "char",
36 .capabilities = (
37 #ifdef CONFIG_MMU
38 /* permit private copies of the data to be taken */
39 BDI_CAP_MAP_COPY |
40 #endif
41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
44 /* no writeback happens */
45 BDI_CAP_NO_ACCT_AND_WRITEBACK),
46 };
47
48 static struct kobj_map *cdev_map;
49
50 static DEFINE_MUTEX(chrdevs_lock);
51
52 static struct char_device_struct {
53 struct char_device_struct *next;
54 unsigned int major;
55 unsigned int baseminor;
56 int minorct;
57 char name[64];
58 struct cdev *cdev; /* will die */
59 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60
61 /* index in the above */
62 static inline int major_to_index(unsigned major)
63 {
64 return major % CHRDEV_MAJOR_HASH_SIZE;
65 }
66
67 #ifdef CONFIG_PROC_FS
68
69 void chrdev_show(struct seq_file *f, off_t offset)
70 {
71 struct char_device_struct *cd;
72
73 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
74 mutex_lock(&chrdevs_lock);
75 for (cd = chrdevs[offset]; cd; cd = cd->next)
76 seq_printf(f, "%3d %s\n", cd->major, cd->name);
77 mutex_unlock(&chrdevs_lock);
78 }
79 }
80
81 #endif /* CONFIG_PROC_FS */
82
83 /*
84 * Register a single major with a specified minor range.
85 *
86 * If major == 0 this functions will dynamically allocate a major and return
87 * its number.
88 *
89 * If major > 0 this function will attempt to reserve the passed range of
90 * minors and will return zero on success.
91 *
92 * Returns a -ve errno on failure.
93 */
94 static struct char_device_struct *
95 __register_chrdev_region(unsigned int major, unsigned int baseminor,
96 int minorct, const char *name)
97 {
98 struct char_device_struct *cd, **cp;
99 int ret = 0;
100 int i;
101
102 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
103 if (cd == NULL)
104 return ERR_PTR(-ENOMEM);
105
106 mutex_lock(&chrdevs_lock);
107
108 /* temporary */
109 if (major == 0) {
110 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
111 if (chrdevs[i] == NULL)
112 break;
113 }
114
115 if (i == 0) {
116 ret = -EBUSY;
117 goto out;
118 }
119 major = i;
120 ret = major;
121 }
122
123 cd->major = major;
124 cd->baseminor = baseminor;
125 cd->minorct = minorct;
126 strlcpy(cd->name, name, sizeof(cd->name));
127
128 i = major_to_index(major);
129
130 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
131 if ((*cp)->major > major ||
132 ((*cp)->major == major &&
133 (((*cp)->baseminor >= baseminor) ||
134 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
135 break;
136
137 /* Check for overlapping minor ranges. */
138 if (*cp && (*cp)->major == major) {
139 int old_min = (*cp)->baseminor;
140 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
141 int new_min = baseminor;
142 int new_max = baseminor + minorct - 1;
143
144 /* New driver overlaps from the left. */
145 if (new_max >= old_min && new_max <= old_max) {
146 ret = -EBUSY;
147 goto out;
148 }
149
150 /* New driver overlaps from the right. */
151 if (new_min <= old_max && new_min >= old_min) {
152 ret = -EBUSY;
153 goto out;
154 }
155 }
156
157 cd->next = *cp;
158 *cp = cd;
159 mutex_unlock(&chrdevs_lock);
160 return cd;
161 out:
162 mutex_unlock(&chrdevs_lock);
163 kfree(cd);
164 return ERR_PTR(ret);
165 }
166
167 static struct char_device_struct *
168 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
169 {
170 struct char_device_struct *cd = NULL, **cp;
171 int i = major_to_index(major);
172
173 mutex_lock(&chrdevs_lock);
174 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
175 if ((*cp)->major == major &&
176 (*cp)->baseminor == baseminor &&
177 (*cp)->minorct == minorct)
178 break;
179 if (*cp) {
180 cd = *cp;
181 *cp = cd->next;
182 }
183 mutex_unlock(&chrdevs_lock);
184 return cd;
185 }
186
187 /**
188 * register_chrdev_region() - register a range of device numbers
189 * @from: the first in the desired range of device numbers; must include
190 * the major number.
191 * @count: the number of consecutive device numbers required
192 * @name: the name of the device or driver.
193 *
194 * Return value is zero on success, a negative error code on failure.
195 */
196 int register_chrdev_region(dev_t from, unsigned count, const char *name)
197 {
198 struct char_device_struct *cd;
199 dev_t to = from + count;
200 dev_t n, next;
201
202 for (n = from; n < to; n = next) {
203 next = MKDEV(MAJOR(n)+1, 0);
204 if (next > to)
205 next = to;
206 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
207 next - n, name);
208 if (IS_ERR(cd))
209 goto fail;
210 }
211 return 0;
212 fail:
213 to = n;
214 for (n = from; n < to; n = next) {
215 next = MKDEV(MAJOR(n)+1, 0);
216 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
217 }
218 return PTR_ERR(cd);
219 }
220
221 /**
222 * alloc_chrdev_region() - register a range of char device numbers
223 * @dev: output parameter for first assigned number
224 * @baseminor: first of the requested range of minor numbers
225 * @count: the number of minor numbers required
226 * @name: the name of the associated device or driver
227 *
228 * Allocates a range of char device numbers. The major number will be
229 * chosen dynamically, and returned (along with the first minor number)
230 * in @dev. Returns zero or a negative error code.
231 */
232 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
233 const char *name)
234 {
235 struct char_device_struct *cd;
236 cd = __register_chrdev_region(0, baseminor, count, name);
237 if (IS_ERR(cd))
238 return PTR_ERR(cd);
239 *dev = MKDEV(cd->major, cd->baseminor);
240 return 0;
241 }
242
243 /**
244 * __register_chrdev() - create and register a cdev occupying a range of minors
245 * @major: major device number or 0 for dynamic allocation
246 * @baseminor: first of the requested range of minor numbers
247 * @count: the number of minor numbers required
248 * @name: name of this range of devices
249 * @fops: file operations associated with this devices
250 *
251 * If @major == 0 this functions will dynamically allocate a major and return
252 * its number.
253 *
254 * If @major > 0 this function will attempt to reserve a device with the given
255 * major number and will return zero on success.
256 *
257 * Returns a -ve errno on failure.
258 *
259 * The name of this device has nothing to do with the name of the device in
260 * /dev. It only helps to keep track of the different owners of devices. If
261 * your module name has only one type of devices it's ok to use e.g. the name
262 * of the module here.
263 */
264 int __register_chrdev(unsigned int major, unsigned int baseminor,
265 unsigned int count, const char *name,
266 const struct file_operations *fops)
267 {
268 struct char_device_struct *cd;
269 struct cdev *cdev;
270 int err = -ENOMEM;
271
272 cd = __register_chrdev_region(major, baseminor, count, name);
273 if (IS_ERR(cd))
274 return PTR_ERR(cd);
275
276 cdev = cdev_alloc();
277 if (!cdev)
278 goto out2;
279
280 cdev->owner = fops->owner;
281 cdev->ops = fops;
282 kobject_set_name(&cdev->kobj, "%s", name);
283
284 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
285 if (err)
286 goto out;
287
288 cd->cdev = cdev;
289
290 return major ? 0 : cd->major;
291 out:
292 kobject_put(&cdev->kobj);
293 out2:
294 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
295 return err;
296 }
297
298 /**
299 * unregister_chrdev_region() - return a range of device numbers
300 * @from: the first in the range of numbers to unregister
301 * @count: the number of device numbers to unregister
302 *
303 * This function will unregister a range of @count device numbers,
304 * starting with @from. The caller should normally be the one who
305 * allocated those numbers in the first place...
306 */
307 void unregister_chrdev_region(dev_t from, unsigned count)
308 {
309 dev_t to = from + count;
310 dev_t n, next;
311
312 for (n = from; n < to; n = next) {
313 next = MKDEV(MAJOR(n)+1, 0);
314 if (next > to)
315 next = to;
316 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
317 }
318 }
319
320 /**
321 * __unregister_chrdev - unregister and destroy a cdev
322 * @major: major device number
323 * @baseminor: first of the range of minor numbers
324 * @count: the number of minor numbers this cdev is occupying
325 * @name: name of this range of devices
326 *
327 * Unregister and destroy the cdev occupying the region described by
328 * @major, @baseminor and @count. This function undoes what
329 * __register_chrdev() did.
330 */
331 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
332 unsigned int count, const char *name)
333 {
334 struct char_device_struct *cd;
335
336 cd = __unregister_chrdev_region(major, baseminor, count);
337 if (cd && cd->cdev)
338 cdev_del(cd->cdev);
339 kfree(cd);
340 }
341
342 static DEFINE_SPINLOCK(cdev_lock);
343
344 static struct kobject *cdev_get(struct cdev *p)
345 {
346 struct module *owner = p->owner;
347 struct kobject *kobj;
348
349 if (owner && !try_module_get(owner))
350 return NULL;
351 kobj = kobject_get(&p->kobj);
352 if (!kobj)
353 module_put(owner);
354 return kobj;
355 }
356
357 void cdev_put(struct cdev *p)
358 {
359 if (p) {
360 struct module *owner = p->owner;
361 kobject_put(&p->kobj);
362 module_put(owner);
363 }
364 }
365
366 /*
367 * Called every time a character special file is opened
368 */
369 static int chrdev_open(struct inode *inode, struct file *filp)
370 {
371 struct cdev *p;
372 struct cdev *new = NULL;
373 int ret = 0;
374
375 spin_lock(&cdev_lock);
376 p = inode->i_cdev;
377 if (!p) {
378 struct kobject *kobj;
379 int idx;
380 spin_unlock(&cdev_lock);
381 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
382 if (!kobj)
383 return -ENXIO;
384 new = container_of(kobj, struct cdev, kobj);
385 spin_lock(&cdev_lock);
386 /* Check i_cdev again in case somebody beat us to it while
387 we dropped the lock. */
388 p = inode->i_cdev;
389 if (!p) {
390 inode->i_cdev = p = new;
391 list_add(&inode->i_devices, &p->list);
392 new = NULL;
393 } else if (!cdev_get(p))
394 ret = -ENXIO;
395 } else if (!cdev_get(p))
396 ret = -ENXIO;
397 spin_unlock(&cdev_lock);
398 cdev_put(new);
399 if (ret)
400 return ret;
401
402 ret = -ENXIO;
403 filp->f_op = fops_get(p->ops);
404 if (!filp->f_op)
405 goto out_cdev_put;
406
407 if (filp->f_op->open) {
408 ret = filp->f_op->open(inode, filp);
409 if (ret)
410 goto out_cdev_put;
411 }
412
413 return 0;
414
415 out_cdev_put:
416 cdev_put(p);
417 return ret;
418 }
419
420 void cd_forget(struct inode *inode)
421 {
422 spin_lock(&cdev_lock);
423 list_del_init(&inode->i_devices);
424 inode->i_cdev = NULL;
425 spin_unlock(&cdev_lock);
426 }
427
428 static void cdev_purge(struct cdev *cdev)
429 {
430 spin_lock(&cdev_lock);
431 while (!list_empty(&cdev->list)) {
432 struct inode *inode;
433 inode = container_of(cdev->list.next, struct inode, i_devices);
434 list_del_init(&inode->i_devices);
435 inode->i_cdev = NULL;
436 }
437 spin_unlock(&cdev_lock);
438 }
439
440 /*
441 * Dummy default file-operations: the only thing this does
442 * is contain the open that then fills in the correct operations
443 * depending on the special file...
444 */
445 const struct file_operations def_chr_fops = {
446 .open = chrdev_open,
447 .llseek = noop_llseek,
448 };
449
450 static struct kobject *exact_match(dev_t dev, int *part, void *data)
451 {
452 struct cdev *p = data;
453 return &p->kobj;
454 }
455
456 static int exact_lock(dev_t dev, void *data)
457 {
458 struct cdev *p = data;
459 return cdev_get(p) ? 0 : -1;
460 }
461
462 /**
463 * cdev_add() - add a char device to the system
464 * @p: the cdev structure for the device
465 * @dev: the first device number for which this device is responsible
466 * @count: the number of consecutive minor numbers corresponding to this
467 * device
468 *
469 * cdev_add() adds the device represented by @p to the system, making it
470 * live immediately. A negative error code is returned on failure.
471 */
472 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
473 {
474 int error;
475
476 p->dev = dev;
477 p->count = count;
478
479 error = kobj_map(cdev_map, dev, count, NULL,
480 exact_match, exact_lock, p);
481 if (error)
482 return error;
483
484 kobject_get(p->kobj.parent);
485
486 return 0;
487 }
488
489 static void cdev_unmap(dev_t dev, unsigned count)
490 {
491 kobj_unmap(cdev_map, dev, count);
492 }
493
494 /**
495 * cdev_del() - remove a cdev from the system
496 * @p: the cdev structure to be removed
497 *
498 * cdev_del() removes @p from the system, possibly freeing the structure
499 * itself.
500 */
501 void cdev_del(struct cdev *p)
502 {
503 cdev_unmap(p->dev, p->count);
504 kobject_put(&p->kobj);
505 }
506
507
508 static void cdev_default_release(struct kobject *kobj)
509 {
510 struct cdev *p = container_of(kobj, struct cdev, kobj);
511 struct kobject *parent = kobj->parent;
512
513 cdev_purge(p);
514 kobject_put(parent);
515 }
516
517 static void cdev_dynamic_release(struct kobject *kobj)
518 {
519 struct cdev *p = container_of(kobj, struct cdev, kobj);
520 struct kobject *parent = kobj->parent;
521
522 cdev_purge(p);
523 kfree(p);
524 kobject_put(parent);
525 }
526
527 static struct kobj_type ktype_cdev_default = {
528 .release = cdev_default_release,
529 };
530
531 static struct kobj_type ktype_cdev_dynamic = {
532 .release = cdev_dynamic_release,
533 };
534
535 /**
536 * cdev_alloc() - allocate a cdev structure
537 *
538 * Allocates and returns a cdev structure, or NULL on failure.
539 */
540 struct cdev *cdev_alloc(void)
541 {
542 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
543 if (p) {
544 INIT_LIST_HEAD(&p->list);
545 kobject_init(&p->kobj, &ktype_cdev_dynamic);
546 }
547 return p;
548 }
549
550 /**
551 * cdev_init() - initialize a cdev structure
552 * @cdev: the structure to initialize
553 * @fops: the file_operations for this device
554 *
555 * Initializes @cdev, remembering @fops, making it ready to add to the
556 * system with cdev_add().
557 */
558 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
559 {
560 memset(cdev, 0, sizeof *cdev);
561 INIT_LIST_HEAD(&cdev->list);
562 kobject_init(&cdev->kobj, &ktype_cdev_default);
563 cdev->ops = fops;
564 }
565
566 static struct kobject *base_probe(dev_t dev, int *part, void *data)
567 {
568 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
569 /* Make old-style 2.4 aliases work */
570 request_module("char-major-%d", MAJOR(dev));
571 return NULL;
572 }
573
574 void __init chrdev_init(void)
575 {
576 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
577 bdi_init(&directly_mappable_cdev_bdi);
578 }
579
580
581 /* Let modules do char dev stuff */
582 EXPORT_SYMBOL(register_chrdev_region);
583 EXPORT_SYMBOL(unregister_chrdev_region);
584 EXPORT_SYMBOL(alloc_chrdev_region);
585 EXPORT_SYMBOL(cdev_init);
586 EXPORT_SYMBOL(cdev_alloc);
587 EXPORT_SYMBOL(cdev_del);
588 EXPORT_SYMBOL(cdev_add);
589 EXPORT_SYMBOL(__register_chrdev);
590 EXPORT_SYMBOL(__unregister_chrdev);
591 EXPORT_SYMBOL(directly_mappable_cdev_bdi);