2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Author: Artem Bityutskiy (Битюцкий Артём)
22 * This file includes implementation of UBI character device operations.
24 * There are two kinds of character devices in UBI: UBI character devices and
25 * UBI volume character devices. UBI character devices allow users to
26 * manipulate whole volumes: create, remove, and re-size them. Volume character
27 * devices provide volume I/O capabilities.
29 * Major and minor numbers are assigned dynamically to both UBI and volume
32 * Well, there is the third kind of character devices - the UBI control
33 * character device, which allows to manipulate by UBI devices - create and
34 * delete them. In other words, it is used for attaching and detaching MTD
38 #include <linux/module.h>
39 #include <linux/stat.h>
40 #include <linux/slab.h>
41 #include <linux/ioctl.h>
42 #include <linux/capability.h>
43 #include <linux/uaccess.h>
44 #include <linux/compat.h>
45 #include <linux/math64.h>
46 #include <mtd/ubi-user.h>
50 * get_exclusive - get exclusive access to an UBI volume.
51 * @desc: volume descriptor
53 * This function changes UBI volume open mode to "exclusive". Returns previous
54 * mode value (positive integer) in case of success and a negative error code
57 static int get_exclusive(struct ubi_volume_desc
*desc
)
60 struct ubi_volume
*vol
= desc
->vol
;
62 spin_lock(&vol
->ubi
->volumes_lock
);
63 users
= vol
->readers
+ vol
->writers
+ vol
->exclusive
;
64 ubi_assert(users
> 0);
66 ubi_err("%d users for volume %d", users
, vol
->vol_id
);
69 vol
->readers
= vol
->writers
= 0;
72 desc
->mode
= UBI_EXCLUSIVE
;
74 spin_unlock(&vol
->ubi
->volumes_lock
);
80 * revoke_exclusive - revoke exclusive mode.
81 * @desc: volume descriptor
82 * @mode: new mode to switch to
84 static void revoke_exclusive(struct ubi_volume_desc
*desc
, int mode
)
86 struct ubi_volume
*vol
= desc
->vol
;
88 spin_lock(&vol
->ubi
->volumes_lock
);
89 ubi_assert(vol
->readers
== 0 && vol
->writers
== 0);
90 ubi_assert(vol
->exclusive
== 1 && desc
->mode
== UBI_EXCLUSIVE
);
92 if (mode
== UBI_READONLY
)
94 else if (mode
== UBI_READWRITE
)
98 spin_unlock(&vol
->ubi
->volumes_lock
);
103 static int vol_cdev_open(struct inode
*inode
, struct file
*file
)
105 struct ubi_volume_desc
*desc
;
106 int vol_id
= iminor(inode
) - 1, mode
, ubi_num
;
108 ubi_num
= ubi_major2num(imajor(inode
));
112 if (file
->f_mode
& FMODE_WRITE
)
113 mode
= UBI_READWRITE
;
117 dbg_gen("open device %d, volume %d, mode %d",
118 ubi_num
, vol_id
, mode
);
120 desc
= ubi_open_volume(ubi_num
, vol_id
, mode
);
122 return PTR_ERR(desc
);
124 file
->private_data
= desc
;
128 static int vol_cdev_release(struct inode
*inode
, struct file
*file
)
130 struct ubi_volume_desc
*desc
= file
->private_data
;
131 struct ubi_volume
*vol
= desc
->vol
;
133 dbg_gen("release device %d, volume %d, mode %d",
134 vol
->ubi
->ubi_num
, vol
->vol_id
, desc
->mode
);
137 ubi_warn("update of volume %d not finished, volume is damaged",
139 ubi_assert(!vol
->changing_leb
);
142 } else if (vol
->changing_leb
) {
143 dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
144 vol
->upd_received
, vol
->upd_bytes
, vol
->ubi
->ubi_num
,
146 vol
->changing_leb
= 0;
150 ubi_close_volume(desc
);
154 static loff_t
vol_cdev_llseek(struct file
*file
, loff_t offset
, int origin
)
156 struct ubi_volume_desc
*desc
= file
->private_data
;
157 struct ubi_volume
*vol
= desc
->vol
;
161 /* Update is in progress, seeking is prohibited */
167 case 0: /* SEEK_SET */
170 case 1: /* SEEK_CUR */
171 new_offset
= file
->f_pos
+ offset
;
173 case 2: /* SEEK_END */
174 new_offset
= vol
->used_bytes
+ offset
;
180 if (new_offset
< 0 || new_offset
> vol
->used_bytes
) {
181 ubi_err("bad seek %lld", new_offset
);
185 dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
186 vol
->vol_id
, offset
, origin
, new_offset
);
188 file
->f_pos
= new_offset
;
192 static int vol_cdev_fsync(struct file
*file
, loff_t start
, loff_t end
,
195 struct ubi_volume_desc
*desc
= file
->private_data
;
196 struct ubi_device
*ubi
= desc
->vol
->ubi
;
197 struct inode
*inode
= file_inode(file
);
199 mutex_lock(&inode
->i_mutex
);
200 err
= ubi_sync(ubi
->ubi_num
);
201 mutex_unlock(&inode
->i_mutex
);
206 static ssize_t
vol_cdev_read(struct file
*file
, __user
char *buf
, size_t count
,
209 struct ubi_volume_desc
*desc
= file
->private_data
;
210 struct ubi_volume
*vol
= desc
->vol
;
211 struct ubi_device
*ubi
= vol
->ubi
;
212 int err
, lnum
, off
, len
, tbuf_size
;
213 size_t count_save
= count
;
216 dbg_gen("read %zd bytes from offset %lld of volume %d",
217 count
, *offp
, vol
->vol_id
);
223 if (vol
->upd_marker
) {
224 ubi_err("damaged volume, update marker is set");
227 if (*offp
== vol
->used_bytes
|| count
== 0)
231 dbg_gen("read from corrupted volume %d", vol
->vol_id
);
233 if (*offp
+ count
> vol
->used_bytes
)
234 count_save
= count
= vol
->used_bytes
- *offp
;
236 tbuf_size
= vol
->usable_leb_size
;
237 if (count
< tbuf_size
)
238 tbuf_size
= ALIGN(count
, ubi
->min_io_size
);
239 tbuf
= kmalloc(tbuf_size
, GFP_KERNEL
);
243 len
= count
> tbuf_size
? tbuf_size
: count
;
244 lnum
= div_u64_rem(*offp
, vol
->usable_leb_size
, &off
);
249 if (off
+ len
>= vol
->usable_leb_size
)
250 len
= vol
->usable_leb_size
- off
;
252 err
= ubi_eba_read_leb(ubi
, vol
, lnum
, tbuf
, off
, len
, 0);
257 if (off
== vol
->usable_leb_size
) {
259 off
-= vol
->usable_leb_size
;
265 err
= copy_to_user(buf
, tbuf
, len
);
272 len
= count
> tbuf_size
? tbuf_size
: count
;
276 return err
? err
: count_save
- count
;
280 * This function allows to directly write to dynamic UBI volumes, without
281 * issuing the volume update operation.
283 static ssize_t
vol_cdev_direct_write(struct file
*file
, const char __user
*buf
,
284 size_t count
, loff_t
*offp
)
286 struct ubi_volume_desc
*desc
= file
->private_data
;
287 struct ubi_volume
*vol
= desc
->vol
;
288 struct ubi_device
*ubi
= vol
->ubi
;
289 int lnum
, off
, len
, tbuf_size
, err
= 0;
290 size_t count_save
= count
;
293 if (!vol
->direct_writes
)
296 dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
297 count
, *offp
, vol
->vol_id
);
299 if (vol
->vol_type
== UBI_STATIC_VOLUME
)
302 lnum
= div_u64_rem(*offp
, vol
->usable_leb_size
, &off
);
303 if (off
& (ubi
->min_io_size
- 1)) {
304 ubi_err("unaligned position");
308 if (*offp
+ count
> vol
->used_bytes
)
309 count_save
= count
= vol
->used_bytes
- *offp
;
311 /* We can write only in fractions of the minimum I/O unit */
312 if (count
& (ubi
->min_io_size
- 1)) {
313 ubi_err("unaligned write length");
317 tbuf_size
= vol
->usable_leb_size
;
318 if (count
< tbuf_size
)
319 tbuf_size
= ALIGN(count
, ubi
->min_io_size
);
320 tbuf
= kmalloc(tbuf_size
, GFP_KERNEL
);
324 len
= count
> tbuf_size
? tbuf_size
: count
;
329 if (off
+ len
>= vol
->usable_leb_size
)
330 len
= vol
->usable_leb_size
- off
;
332 err
= copy_from_user(tbuf
, buf
, len
);
338 err
= ubi_eba_write_leb(ubi
, vol
, lnum
, tbuf
, off
, len
);
343 if (off
== vol
->usable_leb_size
) {
345 off
-= vol
->usable_leb_size
;
351 len
= count
> tbuf_size
? tbuf_size
: count
;
355 return err
? err
: count_save
- count
;
358 static ssize_t
vol_cdev_write(struct file
*file
, const char __user
*buf
,
359 size_t count
, loff_t
*offp
)
362 struct ubi_volume_desc
*desc
= file
->private_data
;
363 struct ubi_volume
*vol
= desc
->vol
;
364 struct ubi_device
*ubi
= vol
->ubi
;
366 if (!vol
->updating
&& !vol
->changing_leb
)
367 return vol_cdev_direct_write(file
, buf
, count
, offp
);
370 err
= ubi_more_update_data(ubi
, vol
, buf
, count
);
372 err
= ubi_more_leb_change_data(ubi
, vol
, buf
, count
);
375 ubi_err("cannot accept more %zd bytes of data, error %d",
382 * The operation is finished, @err contains number of actually
387 if (vol
->changing_leb
) {
388 revoke_exclusive(desc
, UBI_READWRITE
);
392 err
= ubi_check_volume(ubi
, vol
->vol_id
);
397 ubi_warn("volume %d on UBI device %d is corrupted",
398 vol
->vol_id
, ubi
->ubi_num
);
402 ubi_volume_notify(ubi
, vol
, UBI_VOLUME_UPDATED
);
403 revoke_exclusive(desc
, UBI_READWRITE
);
409 static long vol_cdev_ioctl(struct file
*file
, unsigned int cmd
,
413 struct ubi_volume_desc
*desc
= file
->private_data
;
414 struct ubi_volume
*vol
= desc
->vol
;
415 struct ubi_device
*ubi
= vol
->ubi
;
417 void __user
*argp
= (void __user
*)arg
;
420 /* Volume update command */
423 int64_t bytes
, rsvd_bytes
;
425 struct ubi_volume
*backup_vol
= ubi
->volumes
[vol_id2idx(ubi
, UBI_BACKUP_VOLUME_ID
)];
427 if (!capable(CAP_SYS_RESOURCE
)) {
432 err
= copy_from_user(&bytes
, argp
, sizeof(int64_t));
438 if (desc
->mode
== UBI_READONLY
) {
443 rsvd_bytes
= (long long)vol
->reserved_pebs
*
444 ubi
->leb_size
-vol
->data_pad
;
445 if (bytes
< 0 || bytes
> rsvd_bytes
) {
450 err
= get_exclusive(desc
);
454 err
= ubi_start_update(ubi
, vol
, bytes
);
456 revoke_exclusive(desc
, UBI_READWRITE
);
458 ubi_eba_unmap_leb(ubi
, backup_vol
, 0);
459 ubi_eba_unmap_leb(ubi
, backup_vol
, 1);
464 /* Atomic logical eraseblock change command */
467 struct ubi_leb_change_req req
;
469 err
= copy_from_user(&req
, argp
,
470 sizeof(struct ubi_leb_change_req
));
476 if (desc
->mode
== UBI_READONLY
||
477 vol
->vol_type
== UBI_STATIC_VOLUME
) {
482 /* Validate the request */
484 if (req
.lnum
< 0 || req
.lnum
>= vol
->reserved_pebs
||
485 req
.bytes
< 0 || req
.lnum
>= vol
->usable_leb_size
)
488 err
= get_exclusive(desc
);
492 err
= ubi_start_leb_change(ubi
, vol
, &req
);
494 revoke_exclusive(desc
, UBI_READWRITE
);
498 /* Logical eraseblock erasure command */
503 err
= get_user(lnum
, (__user
int32_t *)argp
);
509 if (desc
->mode
== UBI_READONLY
||
510 vol
->vol_type
== UBI_STATIC_VOLUME
) {
515 if (lnum
< 0 || lnum
>= vol
->reserved_pebs
) {
520 dbg_gen("erase LEB %d:%d", vol
->vol_id
, lnum
);
521 err
= ubi_eba_unmap_leb(ubi
, vol
, lnum
);
525 err
= ubi_wl_flush(ubi
, UBI_ALL
, UBI_ALL
);
529 /* Logical eraseblock map command */
532 struct ubi_map_req req
;
534 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_map_req
));
539 err
= ubi_leb_map(desc
, req
.lnum
);
543 /* Logical eraseblock un-map command */
548 err
= get_user(lnum
, (__user
int32_t *)argp
);
553 #ifdef MTK_IPOH_SUPPORT
556 err
= ubi_leb_unmap(desc
, lnum
);
557 #ifdef MTK_IPOH_SUPPORT
563 /* Check if logical eraseblock is mapped command */
568 err
= get_user(lnum
, (__user
int32_t *)argp
);
573 err
= ubi_is_mapped(desc
, lnum
);
577 /* Set volume property command */
578 case UBI_IOCSETVOLPROP
:
580 struct ubi_set_vol_prop_req req
;
582 err
= copy_from_user(&req
, argp
,
583 sizeof(struct ubi_set_vol_prop_req
));
588 switch (req
.property
) {
589 case UBI_VOL_PROP_DIRECT_WRITE
:
590 mutex_lock(&ubi
->device_mutex
);
591 desc
->vol
->direct_writes
= !!req
.value
;
592 mutex_unlock(&ubi
->device_mutex
);
603 err
= copy_from_user(LEB
, argp
, sizeof(int)*2);
608 LEB
[1] = desc
->vol
->eba_tbl
[LEB
[0]];
609 err
= copy_to_user(argp
, LEB
, sizeof(int)*2);
624 * verify_mkvol_req - verify volume creation request.
625 * @ubi: UBI device description object
626 * @req: the request to check
628 * This function zero if the request is correct, and %-EINVAL if not.
630 static int verify_mkvol_req(const struct ubi_device
*ubi
,
631 const struct ubi_mkvol_req
*req
)
633 int n
, err
= -EINVAL
;
635 if (req
->bytes
< 0 || req
->alignment
< 0 || req
->vol_type
< 0 ||
639 if ((req
->vol_id
< 0 || req
->vol_id
>= ubi
->vtbl_slots
) &&
640 req
->vol_id
!= UBI_VOL_NUM_AUTO
)
643 if (req
->alignment
== 0)
649 if (req
->vol_type
!= UBI_DYNAMIC_VOLUME
&&
650 req
->vol_type
!= UBI_STATIC_VOLUME
)
653 if (req
->alignment
> ubi
->leb_size
)
656 n
= req
->alignment
& (ubi
->min_io_size
- 1);
657 if (req
->alignment
!= 1 && n
)
660 if (!req
->name
[0] || !req
->name_len
)
663 if (req
->name_len
> UBI_VOL_NAME_MAX
) {
668 n
= strnlen(req
->name
, req
->name_len
+ 1);
669 if (n
!= req
->name_len
)
675 ubi_err("bad volume creation request");
676 ubi_dump_mkvol_req(req
);
681 * verify_rsvol_req - verify volume re-size request.
682 * @ubi: UBI device description object
683 * @req: the request to check
685 * This function returns zero if the request is correct, and %-EINVAL if not.
687 static int verify_rsvol_req(const struct ubi_device
*ubi
,
688 const struct ubi_rsvol_req
*req
)
693 if (req
->vol_id
< 0 || req
->vol_id
>= ubi
->vtbl_slots
)
700 * rename_volumes - rename UBI volumes.
701 * @ubi: UBI device description object
702 * @req: volumes re-name request
704 * This is a helper function for the volume re-name IOCTL which validates the
705 * the request, opens the volume and calls corresponding volumes management
706 * function. Returns zero in case of success and a negative error code in case
709 static int rename_volumes(struct ubi_device
*ubi
,
710 struct ubi_rnvol_req
*req
)
713 struct list_head rename_list
;
714 struct ubi_rename_entry
*re
, *re1
;
716 if (req
->count
< 0 || req
->count
> UBI_MAX_RNVOL
)
722 /* Validate volume IDs and names in the request */
723 for (i
= 0; i
< req
->count
; i
++) {
724 if (req
->ents
[i
].vol_id
< 0 ||
725 req
->ents
[i
].vol_id
>= ubi
->vtbl_slots
)
727 if (req
->ents
[i
].name_len
< 0)
729 if (req
->ents
[i
].name_len
> UBI_VOL_NAME_MAX
)
730 return -ENAMETOOLONG
;
731 req
->ents
[i
].name
[req
->ents
[i
].name_len
] = '\0';
732 n
= strlen(req
->ents
[i
].name
);
733 if (n
!= req
->ents
[i
].name_len
)
737 /* Make sure volume IDs and names are unique */
738 for (i
= 0; i
< req
->count
- 1; i
++) {
739 for (n
= i
+ 1; n
< req
->count
; n
++) {
740 if (req
->ents
[i
].vol_id
== req
->ents
[n
].vol_id
) {
741 ubi_err("duplicated volume id %d",
742 req
->ents
[i
].vol_id
);
745 if (!strcmp(req
->ents
[i
].name
, req
->ents
[n
].name
)) {
746 ubi_err("duplicated volume name \"%s\"",
753 /* Create the re-name list */
754 INIT_LIST_HEAD(&rename_list
);
755 for (i
= 0; i
< req
->count
; i
++) {
756 int vol_id
= req
->ents
[i
].vol_id
;
757 int name_len
= req
->ents
[i
].name_len
;
758 const char *name
= req
->ents
[i
].name
;
760 re
= kzalloc(sizeof(struct ubi_rename_entry
), GFP_KERNEL
);
766 re
->desc
= ubi_open_volume(ubi
->ubi_num
, vol_id
, UBI_EXCLUSIVE
);
767 if (IS_ERR(re
->desc
)) {
768 err
= PTR_ERR(re
->desc
);
769 ubi_err("cannot open volume %d, error %d", vol_id
, err
);
774 /* Skip this re-naming if the name does not really change */
775 if (re
->desc
->vol
->name_len
== name_len
&&
776 !memcmp(re
->desc
->vol
->name
, name
, name_len
)) {
777 ubi_close_volume(re
->desc
);
782 re
->new_name_len
= name_len
;
783 memcpy(re
->new_name
, name
, name_len
);
784 list_add_tail(&re
->list
, &rename_list
);
785 dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
786 vol_id
, re
->desc
->vol
->name
, name
);
789 if (list_empty(&rename_list
))
792 /* Find out the volumes which have to be removed */
793 list_for_each_entry(re
, &rename_list
, list
) {
794 struct ubi_volume_desc
*desc
;
795 int no_remove_needed
= 0;
798 * Volume @re->vol_id is going to be re-named to
799 * @re->new_name, while its current name is @name. If a volume
800 * with name @re->new_name currently exists, it has to be
801 * removed, unless it is also re-named in the request (@req).
803 list_for_each_entry(re1
, &rename_list
, list
) {
804 if (re
->new_name_len
== re1
->desc
->vol
->name_len
&&
805 !memcmp(re
->new_name
, re1
->desc
->vol
->name
,
806 re1
->desc
->vol
->name_len
)) {
807 no_remove_needed
= 1;
812 if (no_remove_needed
)
816 * It seems we need to remove volume with name @re->new_name,
819 desc
= ubi_open_volume_nm(ubi
->ubi_num
, re
->new_name
,
824 /* Re-naming into a non-existing volume name */
827 /* The volume exists but busy, or an error occurred */
828 ubi_err("cannot open volume \"%s\", error %d",
833 re1
= kzalloc(sizeof(struct ubi_rename_entry
), GFP_KERNEL
);
836 ubi_close_volume(desc
);
842 list_add(&re1
->list
, &rename_list
);
843 dbg_gen("will remove volume %d, name \"%s\"",
844 re1
->desc
->vol
->vol_id
, re1
->desc
->vol
->name
);
847 mutex_lock(&ubi
->device_mutex
);
848 err
= ubi_rename_volumes(ubi
, &rename_list
);
849 mutex_unlock(&ubi
->device_mutex
);
852 list_for_each_entry_safe(re
, re1
, &rename_list
, list
) {
853 ubi_close_volume(re
->desc
);
860 static long ubi_cdev_ioctl(struct file
*file
, unsigned int cmd
,
864 struct ubi_device
*ubi
;
865 struct ubi_volume_desc
*desc
;
866 void __user
*argp
= (void __user
*)arg
;
868 if (!capable(CAP_SYS_RESOURCE
))
871 ubi
= ubi_get_by_major(imajor(file
->f_mapping
->host
));
876 /* Create volume command */
879 struct ubi_mkvol_req req
;
881 dbg_gen("create volume");
882 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_mkvol_req
));
888 err
= verify_mkvol_req(ubi
, &req
);
892 mutex_lock(&ubi
->device_mutex
);
893 err
= ubi_create_volume(ubi
, &req
);
894 mutex_unlock(&ubi
->device_mutex
);
898 err
= put_user(req
.vol_id
, (__user
int32_t *)argp
);
905 /* Remove volume command */
910 dbg_gen("remove volume");
911 err
= get_user(vol_id
, (__user
int32_t *)argp
);
917 desc
= ubi_open_volume(ubi
->ubi_num
, vol_id
, UBI_EXCLUSIVE
);
923 mutex_lock(&ubi
->device_mutex
);
924 err
= ubi_remove_volume(desc
, 0);
925 mutex_unlock(&ubi
->device_mutex
);
928 * The volume is deleted (unless an error occurred), and the
929 * 'struct ubi_volume' object will be freed when
930 * 'ubi_close_volume()' will call 'put_device()'.
932 ubi_close_volume(desc
);
936 /* Re-size volume command */
940 struct ubi_rsvol_req req
;
942 dbg_gen("re-size volume");
943 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_rsvol_req
));
949 err
= verify_rsvol_req(ubi
, &req
);
953 desc
= ubi_open_volume(ubi
->ubi_num
, req
.vol_id
, UBI_EXCLUSIVE
);
959 pebs
= div_u64(req
.bytes
+ desc
->vol
->usable_leb_size
- 1,
960 desc
->vol
->usable_leb_size
);
962 mutex_lock(&ubi
->device_mutex
);
963 err
= ubi_resize_volume(desc
, pebs
);
964 mutex_unlock(&ubi
->device_mutex
);
965 ubi_close_volume(desc
);
969 /* Re-name volumes command */
972 struct ubi_rnvol_req
*req
;
974 dbg_gen("re-name volumes");
975 req
= kmalloc(sizeof(struct ubi_rnvol_req
), GFP_KERNEL
);
981 err
= copy_from_user(req
, argp
, sizeof(struct ubi_rnvol_req
));
988 err
= rename_volumes(ubi
, req
);
1002 static long ctrl_cdev_ioctl(struct file
*file
, unsigned int cmd
,
1006 void __user
*argp
= (void __user
*)arg
;
1008 if (!capable(CAP_SYS_RESOURCE
))
1012 /* Attach an MTD device command */
1015 struct ubi_attach_req req
;
1016 struct mtd_info
*mtd
;
1018 dbg_gen("attach MTD device");
1019 err
= copy_from_user(&req
, argp
, sizeof(struct ubi_attach_req
));
1025 if (req
.mtd_num
< 0 ||
1026 (req
.ubi_num
< 0 && req
.ubi_num
!= UBI_DEV_NUM_AUTO
)) {
1031 mtd
= get_mtd_device(NULL
, req
.mtd_num
);
1038 * Note, further request verification is done by
1039 * 'ubi_attach_mtd_dev()'.
1041 mutex_lock(&ubi_devices_mutex
);
1042 err
= ubi_attach_mtd_dev(mtd
, req
.ubi_num
, req
.vid_hdr_offset
,
1043 req
.max_beb_per1024
);
1044 mutex_unlock(&ubi_devices_mutex
);
1046 put_mtd_device(mtd
);
1048 /* @err contains UBI device number */
1049 err
= put_user(err
, (__user
int32_t *)argp
);
1054 /* Detach an MTD device command */
1059 dbg_gen("detach MTD device");
1060 err
= get_user(ubi_num
, (__user
int32_t *)argp
);
1066 mutex_lock(&ubi_devices_mutex
);
1067 err
= ubi_detach_mtd_dev(ubi_num
, 0);
1068 mutex_unlock(&ubi_devices_mutex
);
1080 #ifdef CONFIG_COMPAT
1081 static long vol_cdev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1084 unsigned long translated_arg
= (unsigned long)compat_ptr(arg
);
1086 return vol_cdev_ioctl(file
, cmd
, translated_arg
);
1089 static long ubi_cdev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1092 unsigned long translated_arg
= (unsigned long)compat_ptr(arg
);
1094 return ubi_cdev_ioctl(file
, cmd
, translated_arg
);
1097 static long ctrl_cdev_compat_ioctl(struct file
*file
, unsigned int cmd
,
1100 unsigned long translated_arg
= (unsigned long)compat_ptr(arg
);
1102 return ctrl_cdev_ioctl(file
, cmd
, translated_arg
);
1105 #define vol_cdev_compat_ioctl NULL
1106 #define ubi_cdev_compat_ioctl NULL
1107 #define ctrl_cdev_compat_ioctl NULL
1110 /* UBI volume character device operations */
1111 const struct file_operations ubi_vol_cdev_operations
= {
1112 .owner
= THIS_MODULE
,
1113 .open
= vol_cdev_open
,
1114 .release
= vol_cdev_release
,
1115 .llseek
= vol_cdev_llseek
,
1116 .read
= vol_cdev_read
,
1117 .write
= vol_cdev_write
,
1118 .fsync
= vol_cdev_fsync
,
1119 .unlocked_ioctl
= vol_cdev_ioctl
,
1120 .compat_ioctl
= vol_cdev_compat_ioctl
,
1123 /* UBI character device operations */
1124 const struct file_operations ubi_cdev_operations
= {
1125 .owner
= THIS_MODULE
,
1126 .llseek
= no_llseek
,
1127 .unlocked_ioctl
= ubi_cdev_ioctl
,
1128 .compat_ioctl
= ubi_cdev_compat_ioctl
,
1131 /* UBI control character device operations */
1132 const struct file_operations ubi_ctrl_cdev_operations
= {
1133 .owner
= THIS_MODULE
,
1134 .unlocked_ioctl
= ctrl_cdev_ioctl
,
1135 .compat_ioctl
= ctrl_cdev_compat_ioctl
,
1136 .llseek
= no_llseek
,