drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mtd / ubi / build.c
CommitLineData
801c135c
AB
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2007
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём),
20 * Frank Haverkamp
21 */
22
23/*
9f961b57
AB
24 * This file includes UBI initialization and building of UBI devices.
25 *
26 * When UBI is initialized, it attaches all the MTD devices specified as the
27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device".
801c135c
AB
30 */
31
32#include <linux/err.h>
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/stringify.h>
f9b0080e 36#include <linux/namei.h>
801c135c 37#include <linux/stat.h>
9f961b57 38#include <linux/miscdevice.h>
ba4087e9 39#include <linux/mtd/partitions.h>
7753f169 40#include <linux/log2.h>
cdfa788a 41#include <linux/kthread.h>
774b1382 42#include <linux/kernel.h>
5a0e3ad6 43#include <linux/slab.h>
801c135c 44#include "ubi.h"
6fa3eb70
S
45#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
46#include <linux/mtd/combo_nand.h>
47#endif
801c135c
AB
48
49/* Maximum length of the 'mtd=' parameter */
50#define MTD_PARAM_LEN_MAX 64
51
5993f9b7 52/* Maximum number of comma-separated items in the 'mtd=' parameter */
edac493d 53#define MTD_PARAM_MAX_COUNT 3
5993f9b7 54
d2f588f9
RG
55/* Maximum value for the number of bad PEBs per 1024 PEBs */
56#define MAX_MTD_UBI_BEB_LIMIT 768
57
af7ad7a0
MKB
58#ifdef CONFIG_MTD_UBI_MODULE
59#define ubi_is_module() 1
60#else
61#define ubi_is_module() 0
62#endif
63
801c135c
AB
64/**
65 * struct mtd_dev_param - MTD device parameter description data structure.
f9b0080e
AB
66 * @name: MTD character device node path, MTD device name, or MTD device number
67 * string
801c135c 68 * @vid_hdr_offs: VID header offset
edac493d 69 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
801c135c 70 */
9c9ec147 71struct mtd_dev_param {
801c135c
AB
72 char name[MTD_PARAM_LEN_MAX];
73 int vid_hdr_offs;
edac493d 74 int max_beb_per1024;
801c135c
AB
75};
76
77/* Numbers of elements set in the @mtd_dev_param array */
9e0c7ef3 78static int __initdata mtd_devs;
801c135c
AB
79
80/* MTD devices specification parameters */
9e0c7ef3 81static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
77e6c2f0
RW
82#ifdef CONFIG_MTD_UBI_FASTMAP
83/* UBI module parameter to enable fastmap automatically on non-fastmap images */
6fa3eb70
S
84#ifdef CONFIG_MTK_NAND_UBIFS_FASTMAP_SUPPORT
85static bool fm_autoconvert = 1;
86#else
87static bool fm_autoconvert = 0;
88#endif
77e6c2f0 89#endif
801c135c
AB
90/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
91struct class *ubi_class;
92
06b68ba1
AB
93/* Slab cache for wear-leveling entries */
94struct kmem_cache *ubi_wl_entry_slab;
95
9f961b57
AB
96/* UBI control character device */
97static struct miscdevice ubi_ctrl_cdev = {
98 .minor = MISC_DYNAMIC_MINOR,
99 .name = "ubi_ctrl",
100 .fops = &ubi_ctrl_cdev_operations,
101};
06b68ba1 102
e73f4459
AB
103/* All UBI devices in system */
104static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
105
cdfa788a
AB
106/* Serializes UBI devices creations and removals */
107DEFINE_MUTEX(ubi_devices_mutex);
108
e73f4459
AB
109/* Protects @ubi_devices and @ubi->ref_count */
110static DEFINE_SPINLOCK(ubi_devices_lock);
111
801c135c 112/* "Show" method for files in '/<sysfs>/class/ubi/' */
c174a08c
AB
113static ssize_t ubi_version_show(struct class *class,
114 struct class_attribute *attr, char *buf)
801c135c
AB
115{
116 return sprintf(buf, "%d\n", UBI_VERSION);
117}
118
119/* UBI version attribute ('/<sysfs>/class/ubi/version') */
120static struct class_attribute ubi_version =
121 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
122
123static ssize_t dev_attribute_show(struct device *dev,
124 struct device_attribute *attr, char *buf);
6fa3eb70
S
125//MTK
126static ssize_t dev_attribute_store(struct device *dev, struct device_attribute *attr,
127 const char *buf, size_t count);
801c135c
AB
128
129/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
130static struct device_attribute dev_eraseblock_size =
131 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
132static struct device_attribute dev_avail_eraseblocks =
133 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
134static struct device_attribute dev_total_eraseblocks =
135 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
136static struct device_attribute dev_volumes_count =
137 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
138static struct device_attribute dev_max_ec =
139 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
6fa3eb70
S
140//MTK start
141static struct device_attribute dev_lbb =
142 __ATTR(lbb, S_IRUGO, dev_attribute_show, NULL);
143static struct device_attribute dev_move_retry =
144 __ATTR(move_retry, S_IRUGO, dev_attribute_show, NULL);
145static struct device_attribute dev_ec_count =
146 __ATTR(ec_count, S_IRUGO, dev_attribute_show, NULL);
147static struct device_attribute dev_mean_ec =
148 __ATTR(mean_ec, S_IRUGO, dev_attribute_show, NULL);
149static struct device_attribute dev_ec_sum =
150 __ATTR(ec_sum, S_IRUGO, dev_attribute_show, NULL);
151static struct device_attribute dev_min_ec =
152 __ATTR(min_ec, S_IRUGO, dev_attribute_show, NULL);
153static struct device_attribute dev_wl_count =
154 __ATTR(wl_count, S_IRUGO, dev_attribute_show, NULL);
155static struct device_attribute dev_wl_size =
156 __ATTR(wl_size, S_IRUGO, dev_attribute_show, NULL);
157static struct device_attribute dev_scrub_count =
158 __ATTR(scrub_count, S_IRUGO, dev_attribute_show, NULL);
159static struct device_attribute dev_scrub_size =
160 __ATTR(scrub_size, S_IRUGO, dev_attribute_show, NULL);
161static struct device_attribute dev_wl_th =
162 __ATTR(wl_th, 00755, dev_attribute_show, dev_attribute_store);
163static struct device_attribute dev_torture =
164 __ATTR(torture, 00755, dev_attribute_show, NULL);
165//MTK end
801c135c
AB
166static struct device_attribute dev_reserved_for_bad =
167 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
168static struct device_attribute dev_bad_peb_count =
169 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
170static struct device_attribute dev_max_vol_count =
171 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
172static struct device_attribute dev_min_io_size =
173 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
174static struct device_attribute dev_bgt_enabled =
175 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
b6b76ba4
AB
176static struct device_attribute dev_mtd_num =
177 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
801c135c 178
0e0ee1cc
DP
179/**
180 * ubi_volume_notify - send a volume change notification.
181 * @ubi: UBI device description object
182 * @vol: volume description object of the changed volume
183 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
184 *
185 * This is a helper function which notifies all subscribers about a volume
186 * change event (creation, removal, re-sizing, re-naming, updating). Returns
187 * zero in case of success and a negative error code in case of failure.
188 */
189int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
190{
191 struct ubi_notification nt;
192
193 ubi_do_get_device_info(ubi, &nt.di);
194 ubi_do_get_volume_info(ubi, vol, &nt.vi);
77e6c2f0
RW
195
196#ifdef CONFIG_MTD_UBI_FASTMAP
197 switch (ntype) {
198 case UBI_VOLUME_ADDED:
199 case UBI_VOLUME_REMOVED:
200 case UBI_VOLUME_RESIZED:
201 case UBI_VOLUME_RENAMED:
202 if (ubi_update_fastmap(ubi)) {
203 ubi_err("Unable to update fastmap!");
204 ubi_ro_mode(ubi);
205 }
206 }
207#endif
0e0ee1cc
DP
208 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
209}
210
211/**
212 * ubi_notify_all - send a notification to all volumes.
213 * @ubi: UBI device description object
214 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
215 * @nb: the notifier to call
216 *
217 * This function walks all volumes of UBI device @ubi and sends the @ntype
218 * notification for each volume. If @nb is %NULL, then all registered notifiers
219 * are called, otherwise only the @nb notifier is called. Returns the number of
220 * sent notifications.
221 */
222int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
223{
224 struct ubi_notification nt;
225 int i, count = 0;
226
227 ubi_do_get_device_info(ubi, &nt.di);
228
229 mutex_lock(&ubi->device_mutex);
230 for (i = 0; i < ubi->vtbl_slots; i++) {
231 /*
232 * Since the @ubi->device is locked, and we are not going to
233 * change @ubi->volumes, we do not have to lock
234 * @ubi->volumes_lock.
235 */
236 if (!ubi->volumes[i])
237 continue;
238
239 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
240 if (nb)
241 nb->notifier_call(nb, ntype, &nt);
242 else
243 blocking_notifier_call_chain(&ubi_notifiers, ntype,
244 &nt);
245 count += 1;
246 }
247 mutex_unlock(&ubi->device_mutex);
248
249 return count;
250}
251
252/**
253 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
254 * @nb: the notifier to call
255 *
256 * This function walks all UBI devices and volumes and sends the
257 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
258 * registered notifiers are called, otherwise only the @nb notifier is called.
259 * Returns the number of sent notifications.
260 */
261int ubi_enumerate_volumes(struct notifier_block *nb)
262{
263 int i, count = 0;
264
265 /*
266 * Since the @ubi_devices_mutex is locked, and we are not going to
267 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
268 */
269 for (i = 0; i < UBI_MAX_DEVICES; i++) {
270 struct ubi_device *ubi = ubi_devices[i];
271
272 if (!ubi)
273 continue;
274 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
275 }
276
277 return count;
278}
279
e73f4459
AB
280/**
281 * ubi_get_device - get UBI device.
282 * @ubi_num: UBI device number
283 *
284 * This function returns UBI device description object for UBI device number
285 * @ubi_num, or %NULL if the device does not exist. This function increases the
286 * device reference count to prevent removal of the device. In other words, the
287 * device cannot be removed if its reference count is not zero.
288 */
289struct ubi_device *ubi_get_device(int ubi_num)
290{
291 struct ubi_device *ubi;
292
293 spin_lock(&ubi_devices_lock);
294 ubi = ubi_devices[ubi_num];
295 if (ubi) {
296 ubi_assert(ubi->ref_count >= 0);
297 ubi->ref_count += 1;
298 get_device(&ubi->dev);
299 }
300 spin_unlock(&ubi_devices_lock);
301
302 return ubi;
303}
304
305/**
306 * ubi_put_device - drop an UBI device reference.
307 * @ubi: UBI device description object
308 */
309void ubi_put_device(struct ubi_device *ubi)
310{
311 spin_lock(&ubi_devices_lock);
312 ubi->ref_count -= 1;
313 put_device(&ubi->dev);
314 spin_unlock(&ubi_devices_lock);
315}
316
317/**
ebaaf1af 318 * ubi_get_by_major - get UBI device by character device major number.
e73f4459
AB
319 * @major: major number
320 *
321 * This function is similar to 'ubi_get_device()', but it searches the device
322 * by its major number.
323 */
324struct ubi_device *ubi_get_by_major(int major)
325{
326 int i;
327 struct ubi_device *ubi;
328
329 spin_lock(&ubi_devices_lock);
330 for (i = 0; i < UBI_MAX_DEVICES; i++) {
331 ubi = ubi_devices[i];
332 if (ubi && MAJOR(ubi->cdev.dev) == major) {
333 ubi_assert(ubi->ref_count >= 0);
334 ubi->ref_count += 1;
335 get_device(&ubi->dev);
336 spin_unlock(&ubi_devices_lock);
337 return ubi;
338 }
339 }
340 spin_unlock(&ubi_devices_lock);
341
342 return NULL;
343}
344
345/**
346 * ubi_major2num - get UBI device number by character device major number.
347 * @major: major number
348 *
349 * This function searches UBI device number object by its major number. If UBI
cdfa788a 350 * device was not found, this function returns -ENODEV, otherwise the UBI device
e73f4459
AB
351 * number is returned.
352 */
353int ubi_major2num(int major)
354{
355 int i, ubi_num = -ENODEV;
356
357 spin_lock(&ubi_devices_lock);
358 for (i = 0; i < UBI_MAX_DEVICES; i++) {
359 struct ubi_device *ubi = ubi_devices[i];
360
361 if (ubi && MAJOR(ubi->cdev.dev) == major) {
362 ubi_num = ubi->ubi_num;
363 break;
364 }
365 }
366 spin_unlock(&ubi_devices_lock);
367
368 return ubi_num;
369}
370
6fa3eb70
S
371/* MTK: "Store" method for files in '/<sysfs>/class/ubi/ubiX/' */
372static ssize_t dev_attribute_store(struct device *dev, struct device_attribute *attr,
373 const char *buf, size_t count)
374{
375 struct ubi_device *ubi;
376 int th=0;
377
378 ubi = container_of(dev, struct ubi_device, dev);
379 ubi = ubi_get_device(ubi->ubi_num);
380 if (!ubi)
381 return -ENODEV;
382
383 if (attr == &dev_wl_th)
384 {
385 sscanf(buf, "%d", &th);
386 printk("set th=%d\n", th);
387 ubi->wl_th = th;
388 }
389 return count;
390}
801c135c
AB
391/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
392static ssize_t dev_attribute_show(struct device *dev,
393 struct device_attribute *attr, char *buf)
394{
e73f4459
AB
395 ssize_t ret;
396 struct ubi_device *ubi;
801c135c 397
e73f4459
AB
398 /*
399 * The below code looks weird, but it actually makes sense. We get the
400 * UBI device reference from the contained 'struct ubi_device'. But it
401 * is unclear if the device was removed or not yet. Indeed, if the
402 * device was removed before we increased its reference count,
403 * 'ubi_get_device()' will return -ENODEV and we fail.
404 *
405 * Remember, 'struct ubi_device' is freed in the release function, so
406 * we still can use 'ubi->ubi_num'.
407 */
801c135c 408 ubi = container_of(dev, struct ubi_device, dev);
e73f4459
AB
409 ubi = ubi_get_device(ubi->ubi_num);
410 if (!ubi)
411 return -ENODEV;
412
801c135c 413 if (attr == &dev_eraseblock_size)
e73f4459 414 ret = sprintf(buf, "%d\n", ubi->leb_size);
801c135c 415 else if (attr == &dev_avail_eraseblocks)
e73f4459 416 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
801c135c 417 else if (attr == &dev_total_eraseblocks)
e73f4459 418 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
801c135c 419 else if (attr == &dev_volumes_count)
4b3cc340 420 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
801c135c 421 else if (attr == &dev_max_ec)
e73f4459 422 ret = sprintf(buf, "%d\n", ubi->max_ec);
6fa3eb70
S
423//MTK start
424 else if (attr == &dev_torture)
425 ret = sprintf(buf, "torture: %d\n", ubi->torture);
426 else if (attr == &dev_wl_th)
427 ret = sprintf(buf, "wl_th: %d\n", ubi->wl_th);
428 else if (attr == &dev_wl_count)
429 ret = sprintf(buf, "wl_count: %d\n", ubi->wl_count);
430 else if (attr == &dev_wl_size)
431 ret = sprintf(buf, "wl_size: %lld\n", ubi->wl_size);
432 else if (attr == &dev_scrub_count)
433 ret = sprintf(buf, "scrub_count: %d\n", ubi->scrub_count);
434 else if (attr == &dev_scrub_size)
435 ret = sprintf(buf, "scrub_size: %lld\n", ubi->scrub_size);
436 else if (attr == &dev_move_retry)
437 ret = sprintf(buf, "move_retry: %d\n", atomic_read(&ubi->move_retry));
438 else if (attr == &dev_lbb)
439 ret = sprintf(buf, "lbb: %d\n", atomic_read(&ubi->lbb));
440 else if (attr == &dev_ec_count)
441 ret = sprintf(buf, "ec_count: %d\n", atomic_read(&ubi->ec_count));
442 else if (attr == &dev_mean_ec)
443 ret = sprintf(buf, "mean_ec: %d\n", ubi->mean_ec);
444 else if (attr == &dev_ec_sum)
445 ret = sprintf(buf, "%lld\n", ubi->ec_sum);
446 else if (attr == &dev_min_ec) {
447 struct ubi_wl_entry *e=NULL, *efree=NULL, *eused=NULL;
448 spin_lock(&ubi->wl_lock);
449 efree = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
450 eused = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
451 if(efree && eused) {
452 if(efree->ec < eused->ec)
453 e = efree;
454 else
455 e = eused;
456 } else if(efree){
457 e = efree;
458 } else {
459 e = eused;
460 }
461 ret = sprintf(buf, "%d\n", e->ec);
462 spin_unlock(&ubi->wl_lock);
463 }
464//MTK end
801c135c 465 else if (attr == &dev_reserved_for_bad)
e73f4459 466 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
801c135c 467 else if (attr == &dev_bad_peb_count)
e73f4459 468 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
801c135c 469 else if (attr == &dev_max_vol_count)
e73f4459 470 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
801c135c 471 else if (attr == &dev_min_io_size)
e73f4459 472 ret = sprintf(buf, "%d\n", ubi->min_io_size);
801c135c 473 else if (attr == &dev_bgt_enabled)
e73f4459 474 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
b6b76ba4
AB
475 else if (attr == &dev_mtd_num)
476 ret = sprintf(buf, "%d\n", ubi->mtd->index);
801c135c 477 else
b6b76ba4 478 ret = -EINVAL;
801c135c 479
e73f4459
AB
480 ubi_put_device(ubi);
481 return ret;
801c135c
AB
482}
483
36b477d0
AB
484static void dev_release(struct device *dev)
485{
486 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
487
488 kfree(ubi);
489}
801c135c
AB
490
491/**
492 * ubi_sysfs_init - initialize sysfs for an UBI device.
493 * @ubi: UBI device description object
0bf1c439
AB
494 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
495 * taken
801c135c
AB
496 *
497 * This function returns zero in case of success and a negative error code in
498 * case of failure.
499 */
0bf1c439 500static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
801c135c
AB
501{
502 int err;
503
504 ubi->dev.release = dev_release;
49dfc299 505 ubi->dev.devt = ubi->cdev.dev;
801c135c 506 ubi->dev.class = ubi_class;
160bbab3 507 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
801c135c
AB
508 err = device_register(&ubi->dev);
509 if (err)
db6e5770 510 return err;
801c135c 511
0bf1c439 512 *ref = 1;
801c135c
AB
513 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
514 if (err)
db6e5770 515 return err;
801c135c
AB
516 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
517 if (err)
db6e5770 518 return err;
801c135c
AB
519 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
520 if (err)
db6e5770 521 return err;
801c135c
AB
522 err = device_create_file(&ubi->dev, &dev_volumes_count);
523 if (err)
db6e5770 524 return err;
801c135c
AB
525 err = device_create_file(&ubi->dev, &dev_max_ec);
526 if (err)
db6e5770 527 return err;
6fa3eb70
S
528//MTK start
529 err = device_create_file(&ubi->dev, &dev_lbb);
530 if (err)
531 return err;
532 err = device_create_file(&ubi->dev, &dev_move_retry);
533 if (err)
534 return err;
535 err = device_create_file(&ubi->dev, &dev_ec_count);
536 if (err)
537 return err;
538 err = device_create_file(&ubi->dev, &dev_mean_ec);
539 if (err)
540 return err;
541 err = device_create_file(&ubi->dev, &dev_ec_sum);
542 if (err)
543 return err;
544 err = device_create_file(&ubi->dev, &dev_min_ec);
545 if (err)
546 return err;
547 err = device_create_file(&ubi->dev, &dev_wl_count);
548 if (err)
549 return err;
550 err = device_create_file(&ubi->dev, &dev_wl_size);
551 if (err)
552 return err;
553 err = device_create_file(&ubi->dev, &dev_scrub_count);
554 if (err)
555 return err;
556 err = device_create_file(&ubi->dev, &dev_scrub_size);
557 if (err)
558 return err;
559 err = device_create_file(&ubi->dev, &dev_wl_th);
560 if (err)
561 return err;
562 err = device_create_file(&ubi->dev, &dev_torture);
563 if (err)
564 return err;
565//MTK end
801c135c
AB
566 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
567 if (err)
db6e5770 568 return err;
801c135c
AB
569 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
570 if (err)
db6e5770 571 return err;
801c135c
AB
572 err = device_create_file(&ubi->dev, &dev_max_vol_count);
573 if (err)
db6e5770 574 return err;
801c135c
AB
575 err = device_create_file(&ubi->dev, &dev_min_io_size);
576 if (err)
db6e5770 577 return err;
801c135c 578 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
b6b76ba4
AB
579 if (err)
580 return err;
581 err = device_create_file(&ubi->dev, &dev_mtd_num);
801c135c
AB
582 return err;
583}
584
585/**
586 * ubi_sysfs_close - close sysfs for an UBI device.
587 * @ubi: UBI device description object
588 */
589static void ubi_sysfs_close(struct ubi_device *ubi)
590{
b6b76ba4 591 device_remove_file(&ubi->dev, &dev_mtd_num);
801c135c
AB
592 device_remove_file(&ubi->dev, &dev_bgt_enabled);
593 device_remove_file(&ubi->dev, &dev_min_io_size);
594 device_remove_file(&ubi->dev, &dev_max_vol_count);
595 device_remove_file(&ubi->dev, &dev_bad_peb_count);
596 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
597 device_remove_file(&ubi->dev, &dev_max_ec);
598 device_remove_file(&ubi->dev, &dev_volumes_count);
599 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
600 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
601 device_remove_file(&ubi->dev, &dev_eraseblock_size);
602 device_unregister(&ubi->dev);
603}
604
605/**
0bf1c439 606 * kill_volumes - destroy all user volumes.
801c135c
AB
607 * @ubi: UBI device description object
608 */
609static void kill_volumes(struct ubi_device *ubi)
610{
611 int i;
612
613 for (i = 0; i < ubi->vtbl_slots; i++)
614 if (ubi->volumes[i])
89b96b69 615 ubi_free_volume(ubi, ubi->volumes[i]);
801c135c
AB
616}
617
618/**
619 * uif_init - initialize user interfaces for an UBI device.
620 * @ubi: UBI device description object
0bf1c439
AB
621 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
622 * taken, otherwise set to %0
623 *
624 * This function initializes various user interfaces for an UBI device. If the
625 * initialization fails at an early stage, this function frees all the
626 * resources it allocated, returns an error, and @ref is set to %0. However,
627 * if the initialization fails after the UBI device was registered in the
628 * driver core subsystem, this function takes a reference to @ubi->dev, because
629 * otherwise the release function ('dev_release()') would free whole @ubi
630 * object. The @ref argument is set to %1 in this case. The caller has to put
631 * this reference.
801c135c
AB
632 *
633 * This function returns zero in case of success and a negative error code in
0bf1c439 634 * case of failure.
801c135c 635 */
0bf1c439 636static int uif_init(struct ubi_device *ubi, int *ref)
801c135c 637{
8c4c19f1 638 int i, err;
801c135c
AB
639 dev_t dev;
640
0bf1c439 641 *ref = 0;
801c135c
AB
642 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
643
644 /*
645 * Major numbers for the UBI character devices are allocated
646 * dynamically. Major numbers of volume character devices are
647 * equivalent to ones of the corresponding UBI character device. Minor
648 * numbers of UBI character devices are 0, while minor numbers of
649 * volume character devices start from 1. Thus, we allocate one major
650 * number and ubi->vtbl_slots + 1 minor numbers.
651 */
652 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
653 if (err) {
654 ubi_err("cannot register UBI character devices");
655 return err;
656 }
657
49dfc299 658 ubi_assert(MINOR(dev) == 0);
801c135c 659 cdev_init(&ubi->cdev, &ubi_cdev_operations);
c8566350 660 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
801c135c
AB
661 ubi->cdev.owner = THIS_MODULE;
662
801c135c
AB
663 err = cdev_add(&ubi->cdev, dev, 1);
664 if (err) {
01f7b309 665 ubi_err("cannot add character device");
801c135c
AB
666 goto out_unreg;
667 }
668
0bf1c439 669 err = ubi_sysfs_init(ubi, ref);
801c135c 670 if (err)
db6e5770 671 goto out_sysfs;
801c135c
AB
672
673 for (i = 0; i < ubi->vtbl_slots; i++)
674 if (ubi->volumes[i]) {
89b96b69 675 err = ubi_add_volume(ubi, ubi->volumes[i]);
01f7b309
AB
676 if (err) {
677 ubi_err("cannot add volume %d", i);
801c135c 678 goto out_volumes;
01f7b309 679 }
801c135c
AB
680 }
681
682 return 0;
683
684out_volumes:
685 kill_volumes(ubi);
db6e5770 686out_sysfs:
0bf1c439
AB
687 if (*ref)
688 get_device(&ubi->dev);
801c135c 689 ubi_sysfs_close(ubi);
801c135c
AB
690 cdev_del(&ubi->cdev);
691out_unreg:
49dfc299 692 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
01f7b309 693 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
801c135c
AB
694 return err;
695}
696
697/**
698 * uif_close - close user interfaces for an UBI device.
699 * @ubi: UBI device description object
505d1caa
AB
700 *
701 * Note, since this function un-registers UBI volume device objects (@vol->dev),
702 * the memory allocated voe the volumes is freed as well (in the release
703 * function).
801c135c
AB
704 */
705static void uif_close(struct ubi_device *ubi)
706{
707 kill_volumes(ubi);
708 ubi_sysfs_close(ubi);
709 cdev_del(&ubi->cdev);
49dfc299 710 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
801c135c
AB
711}
712
505d1caa 713/**
47e1ec70 714 * ubi_free_internal_volumes - free internal volumes.
505d1caa
AB
715 * @ubi: UBI device description object
716 */
47e1ec70 717void ubi_free_internal_volumes(struct ubi_device *ubi)
505d1caa
AB
718{
719 int i;
720
721 for (i = ubi->vtbl_slots;
722 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
723 kfree(ubi->volumes[i]->eba_tbl);
724 kfree(ubi->volumes[i]);
725 }
726}
727
95e6fb02
RG
728static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
729{
730 int limit, device_pebs;
731 uint64_t device_size;
732
733 if (!max_beb_per1024)
734 return 0;
735
736 /*
737 * Here we are using size of the entire flash chip and
738 * not just the MTD partition size because the maximum
739 * number of bad eraseblocks is a percentage of the
740 * whole device and bad eraseblocks are not fairly
741 * distributed over the flash chip. So the worst case
742 * is that all the bad eraseblocks of the chip are in
743 * the MTD partition we are attaching (ubi->mtd).
744 */
745 device_size = mtd_get_device_size(ubi->mtd);
746 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
747 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
748
749 /* Round it up */
750 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
751 limit += 1;
752
753 return limit;
754}
755
801c135c 756/**
85c6e6e2 757 * io_init - initialize I/O sub-system for a given UBI device.
801c135c 758 * @ubi: UBI device description object
256334c3 759 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
801c135c
AB
760 *
761 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
762 * assumed:
763 * o EC header is always at offset zero - this cannot be changed;
764 * o VID header starts just after the EC header at the closest address
cdfa788a 765 * aligned to @io->hdrs_min_io_size;
801c135c 766 * o data starts just after the VID header at the closest address aligned to
cdfa788a 767 * @io->min_io_size
801c135c
AB
768 *
769 * This function returns zero in case of success and a negative error code in
770 * case of failure.
771 */
256334c3 772static int io_init(struct ubi_device *ubi, int max_beb_per1024)
801c135c 773{
719bb840
AB
774 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
775 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
776
801c135c
AB
777 if (ubi->mtd->numeraseregions != 0) {
778 /*
779 * Some flashes have several erase regions. Different regions
780 * may have different eraseblock size and other
781 * characteristics. It looks like mostly multi-region flashes
782 * have one "main" region and one or more small regions to
783 * store boot loader code or boot parameters or whatever. I
784 * guess we should just pick the largest region. But this is
785 * not implemented.
786 */
787 ubi_err("multiple regions, not implemented");
788 return -EINVAL;
789 }
790
dd38fccf 791 if (ubi->vid_hdr_offset < 0)
cdfa788a
AB
792 return -EINVAL;
793
801c135c
AB
794 /*
795 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
796 * physical eraseblocks maximum.
797 */
798
6fa3eb70
S
799#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
800 ubi->peb_size = COMBO_NAND_BLOCK_SIZE;
801 ubi->peb_count = (int)div_u64(ubi->mtd->size, ubi->peb_size);
802#else
801c135c 803 ubi->peb_size = ubi->mtd->erasesize;
69423d99 804 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
6fa3eb70 805#endif
801c135c
AB
806 ubi->flash_size = ubi->mtd->size;
807
8beeb3bb 808 if (mtd_can_have_bb(ubi->mtd)) {
801c135c 809 ubi->bad_allowed = 1;
95e6fb02 810 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
8beeb3bb 811 }
801c135c 812
ebf53f42
AB
813 if (ubi->mtd->type == MTD_NORFLASH) {
814 ubi_assert(ubi->mtd->writesize == 1);
815 ubi->nor_flash = 1;
816 }
817
6fa3eb70
S
818#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
819 ubi->min_io_size = COMBO_NAND_PAGE_SIZE;
820 ubi->hdrs_min_io_size = ubi->min_io_size >> ubi->mtd->subpage_sft;
821#else
801c135c
AB
822 ubi->min_io_size = ubi->mtd->writesize;
823 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
6fa3eb70 824#endif
801c135c 825
cadb40cc
KP
826 /*
827 * Make sure minimal I/O unit is power of 2. Note, there is no
828 * fundamental reason for this assumption. It is just an optimization
829 * which allows us to avoid costly division operations.
830 */
7753f169 831 if (!is_power_of_2(ubi->min_io_size)) {
01f7b309
AB
832 ubi_err("min. I/O unit (%d) is not power of 2",
833 ubi->min_io_size);
801c135c
AB
834 return -EINVAL;
835 }
836
837 ubi_assert(ubi->hdrs_min_io_size > 0);
838 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
839 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
840
6fa3eb70
S
841#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
842 ubi->max_write_size = COMBO_NAND_PAGE_SIZE;
843#else
30b542ef 844 ubi->max_write_size = ubi->mtd->writebufsize;
6fa3eb70
S
845#endif
846#ifdef CONFIG_MTK_MLC_NAND_SUPPORT
847 ubi->max_write_size = ubi->mtd->erasesize/4;
848#endif
30b542ef
AB
849 /*
850 * Maximum write size has to be greater or equivalent to min. I/O
851 * size, and be multiple of min. I/O size.
852 */
853 if (ubi->max_write_size < ubi->min_io_size ||
854 ubi->max_write_size % ubi->min_io_size ||
855 !is_power_of_2(ubi->max_write_size)) {
856 ubi_err("bad write buffer size %d for %d min. I/O unit",
857 ubi->max_write_size, ubi->min_io_size);
858 return -EINVAL;
859 }
860
801c135c
AB
861 /* Calculate default aligned sizes of EC and VID headers */
862 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
863 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
864
719bb840
AB
865 dbg_gen("min_io_size %d", ubi->min_io_size);
866 dbg_gen("max_write_size %d", ubi->max_write_size);
867 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
868 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
869 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
801c135c
AB
870
871 if (ubi->vid_hdr_offset == 0)
872 /* Default offset */
873 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
874 ubi->ec_hdr_alsize;
875 else {
876 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
877 ~(ubi->hdrs_min_io_size - 1);
878 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
879 ubi->vid_hdr_aloffset;
880 }
881
882 /* Similar for the data offset */
e8cfe009 883 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
dd38fccf 884 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
801c135c 885
719bb840
AB
886 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
887 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
888 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
889 dbg_gen("leb_start %d", ubi->leb_start);
801c135c
AB
890
891 /* The shift must be aligned to 32-bit boundary */
892 if (ubi->vid_hdr_shift % 4) {
893 ubi_err("unaligned VID header shift %d",
894 ubi->vid_hdr_shift);
895 return -EINVAL;
896 }
897
898 /* Check sanity */
899 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
900 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
901 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
cadb40cc 902 ubi->leb_start & (ubi->min_io_size - 1)) {
801c135c
AB
903 ubi_err("bad VID header (%d) or data offsets (%d)",
904 ubi->vid_hdr_offset, ubi->leb_start);
905 return -EINVAL;
906 }
907
b86a2c56
AB
908 /*
909 * Set maximum amount of physical erroneous eraseblocks to be 10%.
910 * Erroneous PEB are those which have read errors.
911 */
912 ubi->max_erroneous = ubi->peb_count / 10;
913 if (ubi->max_erroneous < 16)
914 ubi->max_erroneous = 16;
719bb840 915 dbg_gen("max_erroneous %d", ubi->max_erroneous);
b86a2c56 916
801c135c
AB
917 /*
918 * It may happen that EC and VID headers are situated in one minimal
919 * I/O unit. In this case we can only accept this UBI image in
920 * read-only mode.
921 */
922 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
049333ce 923 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
801c135c
AB
924 ubi->ro_mode = 1;
925 }
926
927 ubi->leb_size = ubi->peb_size - ubi->leb_start;
928
929 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
049333ce
AB
930 ubi_msg("MTD device %d is write-protected, attach in read-only mode",
931 ubi->mtd->index);
801c135c
AB
932 ubi->ro_mode = 1;
933 }
934
6fa3eb70
S
935 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
936 ubi->peb_size, ubi->peb_size >> 10);
937 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
938 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
939 if (ubi->hdrs_min_io_size != ubi->min_io_size)
940 ubi_msg("sub-page size: %d",
941 ubi->hdrs_min_io_size);
942 ubi_msg("VID header offset: %d (aligned %d)",
943 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
944 ubi_msg("data offset: %d", ubi->leb_start);
945
801c135c 946 /*
fbd0107f 947 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
801c135c
AB
948 * unfortunately, MTD does not provide this information. We should loop
949 * over all physical eraseblocks and invoke mtd->block_is_bad() for
fbd0107f
AB
950 * each physical eraseblock. So, we leave @ubi->bad_peb_count
951 * uninitialized so far.
801c135c
AB
952 */
953
954 return 0;
955}
956
4ccf8cff
AB
957/**
958 * autoresize - re-size the volume which has the "auto-resize" flag set.
959 * @ubi: UBI device description object
960 * @vol_id: ID of the volume to re-size
961 *
fbd0107f 962 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
4ccf8cff
AB
963 * the volume table to the largest possible size. See comments in ubi-header.h
964 * for more description of the flag. Returns zero in case of success and a
965 * negative error code in case of failure.
966 */
967static int autoresize(struct ubi_device *ubi, int vol_id)
968{
969 struct ubi_volume_desc desc;
970 struct ubi_volume *vol = ubi->volumes[vol_id];
971 int err, old_reserved_pebs = vol->reserved_pebs;
972
abb3e011
AB
973 if (ubi->ro_mode) {
974 ubi_warn("skip auto-resize because of R/O mode");
975 return 0;
976 }
977
4ccf8cff
AB
978 /*
979 * Clear the auto-resize flag in the volume in-memory copy of the
505d1caa 980 * volume table, and 'ubi_resize_volume()' will propagate this change
4ccf8cff
AB
981 * to the flash.
982 */
983 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
984
985 if (ubi->avail_pebs == 0) {
986 struct ubi_vtbl_record vtbl_rec;
987
988 /*
505d1caa 989 * No available PEBs to re-size the volume, clear the flag on
4ccf8cff
AB
990 * flash and exit.
991 */
d856c13c 992 vtbl_rec = ubi->vtbl[vol_id];
4ccf8cff
AB
993 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
994 if (err)
995 ubi_err("cannot clean auto-resize flag for volume %d",
996 vol_id);
997 } else {
998 desc.vol = vol;
999 err = ubi_resize_volume(&desc,
1000 old_reserved_pebs + ubi->avail_pebs);
1001 if (err)
1002 ubi_err("cannot auto-resize volume %d", vol_id);
1003 }
1004
1005 if (err)
1006 return err;
1007
1008 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
1009 vol->name, old_reserved_pebs, vol->reserved_pebs);
1010 return 0;
1011}
1012
801c135c 1013/**
cdfa788a 1014 * ubi_attach_mtd_dev - attach an MTD device.
ebaaf1af 1015 * @mtd: MTD device description object
897a316c 1016 * @ubi_num: number to assign to the new UBI device
801c135c 1017 * @vid_hdr_offset: VID header offset
edac493d 1018 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
801c135c 1019 *
897a316c
AB
1020 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
1021 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
505d1caa 1022 * which case this function finds a vacant device number and assigns it
897a316c
AB
1023 * automatically. Returns the new UBI device number in case of success and a
1024 * negative error code in case of failure.
cdfa788a
AB
1025 *
1026 * Note, the invocations of this function has to be serialized by the
1027 * @ubi_devices_mutex.
801c135c 1028 */
256334c3
RG
1029int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
1030 int vid_hdr_offset, int max_beb_per1024)
801c135c
AB
1031{
1032 struct ubi_device *ubi;
0bf1c439 1033 int i, err, ref = 0;
6fa3eb70 1034 unsigned long long attach_time = 0;
801c135c 1035
d2f588f9
RG
1036 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
1037 return -EINVAL;
1038
1039 if (!max_beb_per1024)
1040 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
1041
cdfa788a
AB
1042 /*
1043 * Check if we already have the same MTD device attached.
1044 *
1045 * Note, this function assumes that UBI devices creations and deletions
1046 * are serialized, so it does not take the &ubi_devices_lock.
1047 */
897a316c 1048 for (i = 0; i < UBI_MAX_DEVICES; i++) {
b96bf4c3 1049 ubi = ubi_devices[i];
cdfa788a 1050 if (ubi && mtd->index == ubi->mtd->index) {
e2986827 1051 ubi_err("mtd%d is already attached to ubi%d",
801c135c 1052 mtd->index, i);
897a316c 1053 return -EEXIST;
801c135c 1054 }
897a316c 1055 }
801c135c 1056
897a316c
AB
1057 /*
1058 * Make sure this MTD device is not emulated on top of an UBI volume
1059 * already. Well, generally this recursion works fine, but there are
1060 * different problems like the UBI module takes a reference to itself
1061 * by attaching (and thus, opening) the emulated MTD device. This
1062 * results in inability to unload the module. And in general it makes
1063 * no sense to attach emulated MTD devices, so we prohibit this.
1064 */
1065 if (mtd->type == MTD_UBIVOLUME) {
049333ce
AB
1066 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
1067 mtd->index);
897a316c
AB
1068 return -EINVAL;
1069 }
1070
1071 if (ubi_num == UBI_DEV_NUM_AUTO) {
1072 /* Search for an empty slot in the @ubi_devices array */
1073 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
1074 if (!ubi_devices[ubi_num])
1075 break;
1076 if (ubi_num == UBI_MAX_DEVICES) {
e2986827 1077 ubi_err("only %d UBI devices may be created",
9c9ec147 1078 UBI_MAX_DEVICES);
897a316c
AB
1079 return -ENFILE;
1080 }
1081 } else {
1082 if (ubi_num >= UBI_MAX_DEVICES)
1083 return -EINVAL;
b96bf4c3 1084
897a316c
AB
1085 /* Make sure ubi_num is not busy */
1086 if (ubi_devices[ubi_num]) {
e2986827 1087 ubi_err("ubi%d already exists", ubi_num);
897a316c
AB
1088 return -EEXIST;
1089 }
b96bf4c3
AB
1090 }
1091
cdfa788a
AB
1092 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
1093 if (!ubi)
1094 return -ENOMEM;
801c135c 1095
cdfa788a 1096 ubi->mtd = mtd;
897a316c 1097 ubi->ubi_num = ubi_num;
801c135c 1098 ubi->vid_hdr_offset = vid_hdr_offset;
4ccf8cff 1099 ubi->autoresize_vol_id = -1;
6fa3eb70
S
1100//MTK start
1101 ubi->wl_th = CONFIG_MTD_UBI_WL_THRESHOLD;
1102 atomic_set(&ubi->ec_count, 0);
1103 atomic_set(&ubi->move_retry, 0);
1104//MTK end
4ccf8cff 1105
77e6c2f0
RW
1106#ifdef CONFIG_MTD_UBI_FASTMAP
1107 ubi->fm_pool.used = ubi->fm_pool.size = 0;
1108 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
1109
1110 /*
1111 * fm_pool.max_size is 5% of the total number of PEBs but it's also
1112 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
1113 */
1114 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
1115 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
1116 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
1117 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
1118
1119 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
1120 ubi->fm_disabled = !fm_autoconvert;
1121
1122 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
1123 <= UBI_FM_MAX_START) {
1124 ubi_err("More than %i PEBs are needed for fastmap, sorry.",
1125 UBI_FM_MAX_START);
1126 ubi->fm_disabled = 1;
1127 }
1128
1129 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
1130 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1131#else
1132 ubi->fm_disabled = 1;
1133#endif
4ccf8cff
AB
1134 mutex_init(&ubi->buf_mutex);
1135 mutex_init(&ubi->ckvol_mutex);
f089c0b2 1136 mutex_init(&ubi->device_mutex);
4ccf8cff 1137 spin_lock_init(&ubi->volumes_lock);
77e6c2f0
RW
1138 mutex_init(&ubi->fm_mutex);
1139 init_rwsem(&ubi->fm_sem);
cdfa788a 1140
697fa972 1141 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
cdfa788a 1142
256334c3 1143 err = io_init(ubi, max_beb_per1024);
801c135c
AB
1144 if (err)
1145 goto out_free;
1146
ad5942ba 1147 err = -ENOMEM;
6fa3eb70 1148 ubi->peb_buf = kmalloc(ubi->peb_size, GFP_KERNEL);
0ca39d74 1149 if (!ubi->peb_buf)
e88d6e10
AB
1150 goto out_free;
1151
77e6c2f0
RW
1152#ifdef CONFIG_MTD_UBI_FASTMAP
1153 ubi->fm_size = ubi_calc_fm_size(ubi);
6fa3eb70 1154 ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
77e6c2f0
RW
1155 if (!ubi->fm_buf)
1156 goto out_free;
1157#endif
6fa3eb70 1158 attach_time = sched_clock();
dac6e208 1159 err = ubi_attach(ubi, 0);
801c135c 1160 if (err) {
47e1ec70 1161 ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
eab73772 1162 goto out_free;
801c135c
AB
1163 }
1164
4ccf8cff
AB
1165 if (ubi->autoresize_vol_id != -1) {
1166 err = autoresize(ubi, ubi->autoresize_vol_id);
1167 if (err)
1168 goto out_detach;
1169 }
1170
12f567db
IH
1171 /* Make device "available" before it becomes accessible via sysfs */
1172 ubi_devices[ubi_num] = ubi;
1173
0bf1c439 1174 err = uif_init(ubi, &ref);
801c135c 1175 if (err)
0bf1c439 1176 goto out_detach;
801c135c 1177
2a734bb8
AB
1178 err = ubi_debugfs_init_dev(ubi);
1179 if (err)
1180 goto out_uif;
1181
cdfa788a
AB
1182 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1183 if (IS_ERR(ubi->bgt_thread)) {
1184 err = PTR_ERR(ubi->bgt_thread);
1185 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1186 err);
2a734bb8 1187 goto out_debugfs;
cdfa788a
AB
1188 }
1189
6fa3eb70
S
1190 attach_time = sched_clock() - attach_time;
1191 do_div(attach_time, 1000000);
719bb840
AB
1192 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
1193 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
1194 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
1195 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
1196 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
1197 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1198 ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
1199 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1200 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1201 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1202 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
1203 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1204 ubi->vtbl_slots);
1205 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1206 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1207 ubi->image_seq);
1208 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1209 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
801c135c 1210
ddbd3b61
AB
1211 /*
1212 * The below lock makes sure we do not race with 'ubi_thread()' which
1213 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1214 */
1215 spin_lock(&ubi->wl_lock);
28237e45 1216 ubi->thread_enabled = 1;
d37e6bf6 1217 wake_up_process(ubi->bgt_thread);
ddbd3b61 1218 spin_unlock(&ubi->wl_lock);
801c135c 1219
0e0ee1cc 1220 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
897a316c 1221 return ubi_num;
801c135c 1222
2a734bb8
AB
1223out_debugfs:
1224 ubi_debugfs_exit_dev(ubi);
cdfa788a 1225out_uif:
01a4110d
AB
1226 get_device(&ubi->dev);
1227 ubi_assert(ref);
cdfa788a 1228 uif_close(ubi);
801c135c 1229out_detach:
12f567db 1230 ubi_devices[ubi_num] = NULL;
801c135c 1231 ubi_wl_close(ubi);
47e1ec70 1232 ubi_free_internal_volumes(ubi);
6fa3eb70 1233 kfree(ubi->vtbl);
801c135c 1234out_free:
6fa3eb70
S
1235 kfree(ubi->peb_buf);
1236 kfree(ubi->fm_buf);
0bf1c439
AB
1237 if (ref)
1238 put_device(&ubi->dev);
1239 else
1240 kfree(ubi);
801c135c
AB
1241 return err;
1242}
1243
1244/**
cdfa788a
AB
1245 * ubi_detach_mtd_dev - detach an MTD device.
1246 * @ubi_num: UBI device number to detach from
1247 * @anyway: detach MTD even if device reference count is not zero
1248 *
1249 * This function destroys an UBI device number @ubi_num and detaches the
1250 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1251 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1252 * exist.
1253 *
1254 * Note, the invocations of this function has to be serialized by the
1255 * @ubi_devices_mutex.
801c135c 1256 */
cdfa788a 1257int ubi_detach_mtd_dev(int ubi_num, int anyway)
801c135c 1258{
cdfa788a
AB
1259 struct ubi_device *ubi;
1260
1261 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1262 return -EINVAL;
1263
0e0ee1cc
DP
1264 ubi = ubi_get_device(ubi_num);
1265 if (!ubi)
cdfa788a 1266 return -EINVAL;
cdfa788a 1267
0e0ee1cc
DP
1268 spin_lock(&ubi_devices_lock);
1269 put_device(&ubi->dev);
1270 ubi->ref_count -= 1;
cdfa788a
AB
1271 if (ubi->ref_count) {
1272 if (!anyway) {
897a316c 1273 spin_unlock(&ubi_devices_lock);
cdfa788a
AB
1274 return -EBUSY;
1275 }
1276 /* This may only happen if there is a bug */
1277 ubi_err("%s reference count %d, destroy anyway",
1278 ubi->ubi_name, ubi->ref_count);
1279 }
897a316c 1280 ubi_devices[ubi_num] = NULL;
cdfa788a
AB
1281 spin_unlock(&ubi_devices_lock);
1282
897a316c 1283 ubi_assert(ubi_num == ubi->ubi_num);
0e0ee1cc 1284 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
719bb840 1285 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
77e6c2f0
RW
1286#ifdef CONFIG_MTD_UBI_FASTMAP
1287 /* If we don't write a new fastmap at detach time we lose all
1288 * EC updates that have been made since the last written fastmap. */
1289 ubi_update_fastmap(ubi);
1290#endif
cdfa788a
AB
1291 /*
1292 * Before freeing anything, we have to stop the background thread to
1293 * prevent it from doing anything on this device while we are freeing.
1294 */
1295 if (ubi->bgt_thread)
1296 kthread_stop(ubi->bgt_thread);
801c135c 1297
36b477d0
AB
1298 /*
1299 * Get a reference to the device in order to prevent 'dev_release()'
0bf1c439 1300 * from freeing the @ubi object.
36b477d0
AB
1301 */
1302 get_device(&ubi->dev);
1303
2a734bb8 1304 ubi_debugfs_exit_dev(ubi);
801c135c 1305 uif_close(ubi);
77e6c2f0 1306
801c135c 1307 ubi_wl_close(ubi);
47e1ec70 1308 ubi_free_internal_volumes(ubi);
6fa3eb70 1309 kfree(ubi->vtbl);
801c135c 1310 put_mtd_device(ubi->mtd);
6fa3eb70
S
1311#ifdef CONFIG_BLB
1312 kfree(ubi->databuf);
1313 kfree(ubi->oobbuf);
1314#endif
1315 kfree(ubi->peb_buf);
1316 kfree(ubi->fm_buf);
cdfa788a 1317 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
36b477d0 1318 put_device(&ubi->dev);
cdfa788a 1319 return 0;
801c135c
AB
1320}
1321
cdfa788a 1322/**
f9b0080e
AB
1323 * open_mtd_by_chdev - open an MTD device by its character device node path.
1324 * @mtd_dev: MTD character device node path
1325 *
1326 * This helper function opens an MTD device by its character node device path.
1327 * Returns MTD device description object in case of success and a negative
1328 * error code in case of failure.
1329 */
1330static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1331{
1332 int err, major, minor, mode;
1333 struct path path;
1334
1335 /* Probably this is an MTD character device node path */
1336 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1337 if (err)
1338 return ERR_PTR(err);
1339
1340 /* MTD device number is defined by the major / minor numbers */
1341 major = imajor(path.dentry->d_inode);
1342 minor = iminor(path.dentry->d_inode);
1343 mode = path.dentry->d_inode->i_mode;
1344 path_put(&path);
1345 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
1346 return ERR_PTR(-EINVAL);
1347
1348 if (minor & 1)
1349 /*
1350 * Just do not think the "/dev/mtdrX" devices support is need,
1351 * so do not support them to avoid doing extra work.
1352 */
1353 return ERR_PTR(-EINVAL);
1354
1355 return get_mtd_device(NULL, minor / 2);
1356}
1357
1358/**
1359 * open_mtd_device - open MTD device by name, character device path, or number.
1360 * @mtd_dev: name, character device node path, or MTD device device number
cdfa788a 1361 *
d1f3dd6c 1362 * This function tries to open and MTD device described by @mtd_dev string,
f9b0080e
AB
1363 * which is first treated as ASCII MTD device number, and if it is not true, it
1364 * is treated as MTD device name, and if that is also not true, it is treated
1365 * as MTD character device node path. Returns MTD device description object in
1366 * case of success and a negative error code in case of failure.
cdfa788a
AB
1367 */
1368static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1369{
1370 struct mtd_info *mtd;
d1f3dd6c
AB
1371 int mtd_num;
1372 char *endp;
cdfa788a 1373
d1f3dd6c
AB
1374 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1375 if (*endp != '\0' || mtd_dev == endp) {
cdfa788a 1376 /*
d1f3dd6c
AB
1377 * This does not look like an ASCII integer, probably this is
1378 * MTD device name.
cdfa788a 1379 */
d1f3dd6c 1380 mtd = get_mtd_device_nm(mtd_dev);
f9b0080e
AB
1381 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1382 /* Probably this is an MTD character device node path */
1383 mtd = open_mtd_by_chdev(mtd_dev);
d1f3dd6c 1384 } else
cdfa788a 1385 mtd = get_mtd_device(NULL, mtd_num);
cdfa788a
AB
1386
1387 return mtd;
1388}
1389
801c135c
AB
1390static int __init ubi_init(void)
1391{
1392 int err, i, k;
1393
1394 /* Ensure that EC and VID headers have correct size */
1395 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1396 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1397
1398 if (mtd_devs > UBI_MAX_DEVICES) {
c4506092 1399 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
801c135c
AB
1400 return -EINVAL;
1401 }
1402
9f961b57 1403 /* Create base sysfs directory and sysfs files */
801c135c 1404 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
9f961b57
AB
1405 if (IS_ERR(ubi_class)) {
1406 err = PTR_ERR(ubi_class);
c4506092 1407 ubi_err("cannot create UBI class");
9f961b57
AB
1408 goto out;
1409 }
801c135c
AB
1410
1411 err = class_create_file(ubi_class, &ubi_version);
9f961b57 1412 if (err) {
c4506092 1413 ubi_err("cannot create sysfs file");
801c135c 1414 goto out_class;
9f961b57
AB
1415 }
1416
1417 err = misc_register(&ubi_ctrl_cdev);
1418 if (err) {
c4506092 1419 ubi_err("cannot register device");
9f961b57
AB
1420 goto out_version;
1421 }
801c135c 1422
06b68ba1 1423 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
c4506092
AB
1424 sizeof(struct ubi_wl_entry),
1425 0, 0, NULL);
06b68ba1 1426 if (!ubi_wl_entry_slab)
b9a06623 1427 goto out_dev_unreg;
06b68ba1 1428
2a734bb8
AB
1429 err = ubi_debugfs_init();
1430 if (err)
1431 goto out_slab;
1432
1433
801c135c
AB
1434 /* Attach MTD devices */
1435 for (i = 0; i < mtd_devs; i++) {
1436 struct mtd_dev_param *p = &mtd_dev_param[i];
cdfa788a 1437 struct mtd_info *mtd;
801c135c
AB
1438
1439 cond_resched();
cdfa788a
AB
1440
1441 mtd = open_mtd_device(p->name);
1442 if (IS_ERR(mtd)) {
1443 err = PTR_ERR(mtd);
1444 goto out_detach;
1445 }
1446
1447 mutex_lock(&ubi_devices_mutex);
897a316c 1448 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
edac493d 1449 p->vid_hdr_offs, p->max_beb_per1024);
cdfa788a
AB
1450 mutex_unlock(&ubi_devices_mutex);
1451 if (err < 0) {
c4506092 1452 ubi_err("cannot attach mtd%d", mtd->index);
af7ad7a0
MKB
1453 put_mtd_device(mtd);
1454
1455 /*
1456 * Originally UBI stopped initializing on any error.
1457 * However, later on it was found out that this
1458 * behavior is not very good when UBI is compiled into
1459 * the kernel and the MTD devices to attach are passed
1460 * through the command line. Indeed, UBI failure
1461 * stopped whole boot sequence.
1462 *
1463 * To fix this, we changed the behavior for the
1464 * non-module case, but preserved the old behavior for
1465 * the module case, just for compatibility. This is a
1466 * little inconsistent, though.
1467 */
1468 if (ubi_is_module())
1469 goto out_detach;
9f961b57 1470 }
801c135c
AB
1471 }
1472
1473 return 0;
1474
1475out_detach:
1476 for (k = 0; k < i; k++)
cdfa788a
AB
1477 if (ubi_devices[k]) {
1478 mutex_lock(&ubi_devices_mutex);
1479 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1480 mutex_unlock(&ubi_devices_mutex);
1481 }
2a734bb8
AB
1482 ubi_debugfs_exit();
1483out_slab:
06b68ba1 1484 kmem_cache_destroy(ubi_wl_entry_slab);
9f961b57
AB
1485out_dev_unreg:
1486 misc_deregister(&ubi_ctrl_cdev);
3a8d4642 1487out_version:
801c135c
AB
1488 class_remove_file(ubi_class, &ubi_version);
1489out_class:
1490 class_destroy(ubi_class);
9f961b57 1491out:
c4506092 1492 ubi_err("UBI error: cannot initialize UBI, error %d", err);
801c135c
AB
1493 return err;
1494}
cf38aca5 1495late_initcall(ubi_init);
801c135c
AB
1496
1497static void __exit ubi_exit(void)
1498{
b96bf4c3 1499 int i;
801c135c 1500
b96bf4c3 1501 for (i = 0; i < UBI_MAX_DEVICES; i++)
cdfa788a
AB
1502 if (ubi_devices[i]) {
1503 mutex_lock(&ubi_devices_mutex);
1504 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1505 mutex_unlock(&ubi_devices_mutex);
1506 }
2a734bb8 1507 ubi_debugfs_exit();
06b68ba1 1508 kmem_cache_destroy(ubi_wl_entry_slab);
9f961b57 1509 misc_deregister(&ubi_ctrl_cdev);
801c135c
AB
1510 class_remove_file(ubi_class, &ubi_version);
1511 class_destroy(ubi_class);
1512}
1513module_exit(ubi_exit);
1514
1515/**
ebaaf1af 1516 * bytes_str_to_int - convert a number of bytes string into an integer.
801c135c
AB
1517 * @str: the string to convert
1518 *
1519 * This function returns positive resulting integer in case of success and a
1520 * negative error code in case of failure.
1521 */
1522static int __init bytes_str_to_int(const char *str)
1523{
1524 char *endp;
1525 unsigned long result;
1526
1527 result = simple_strtoul(str, &endp, 0);
774b1382 1528 if (str == endp || result >= INT_MAX) {
e28453bb 1529 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
801c135c
AB
1530 return -EINVAL;
1531 }
1532
1533 switch (*endp) {
1534 case 'G':
1535 result *= 1024;
1536 case 'M':
1537 result *= 1024;
1538 case 'K':
801c135c 1539 result *= 1024;
aeddb877 1540 if (endp[1] == 'i' && endp[2] == 'B')
801c135c
AB
1541 endp += 2;
1542 case '\0':
1543 break;
1544 default:
e28453bb 1545 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
801c135c
AB
1546 return -EINVAL;
1547 }
1548
1549 return result;
1550}
1551
1552/**
1553 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1554 * @val: the parameter value to parse
1555 * @kp: not used
1556 *
1557 * This function returns zero in case of success and a negative error code in
1558 * case of error.
1559 */
1560static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1561{
1562 int i, len;
1563 struct mtd_dev_param *p;
1564 char buf[MTD_PARAM_LEN_MAX];
1565 char *pbuf = &buf[0];
5993f9b7 1566 char *tokens[MTD_PARAM_MAX_COUNT];
801c135c 1567
77c722dd
AB
1568 if (!val)
1569 return -EINVAL;
1570
801c135c 1571 if (mtd_devs == UBI_MAX_DEVICES) {
e28453bb
AB
1572 ubi_err("UBI error: too many parameters, max. is %d\n",
1573 UBI_MAX_DEVICES);
801c135c
AB
1574 return -EINVAL;
1575 }
1576
1577 len = strnlen(val, MTD_PARAM_LEN_MAX);
1578 if (len == MTD_PARAM_LEN_MAX) {
e28453bb
AB
1579 ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1580 val, MTD_PARAM_LEN_MAX);
801c135c
AB
1581 return -EINVAL;
1582 }
1583
1584 if (len == 0) {
e28453bb 1585 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
801c135c
AB
1586 return 0;
1587 }
1588
1589 strcpy(buf, val);
1590
1591 /* Get rid of the final newline */
1592 if (buf[len - 1] == '\n')
503990eb 1593 buf[len - 1] = '\0';
801c135c 1594
5993f9b7 1595 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
801c135c
AB
1596 tokens[i] = strsep(&pbuf, ",");
1597
1598 if (pbuf) {
e28453bb 1599 ubi_err("UBI error: too many arguments at \"%s\"\n", val);
801c135c
AB
1600 return -EINVAL;
1601 }
1602
801c135c
AB
1603 p = &mtd_dev_param[mtd_devs];
1604 strcpy(&p->name[0], tokens[0]);
1605
1606 if (tokens[1])
1607 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
801c135c
AB
1608
1609 if (p->vid_hdr_offs < 0)
1610 return p->vid_hdr_offs;
801c135c 1611
edac493d
RG
1612 if (tokens[2]) {
1613 int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
1614
1615 if (err) {
e28453bb
AB
1616 ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1617 tokens[2]);
edac493d
RG
1618 return -EINVAL;
1619 }
1620 }
1621
801c135c
AB
1622 mtd_devs += 1;
1623 return 0;
1624}
1625
1626module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
edac493d 1627MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n"
801c135c 1628 "Multiple \"mtd\" parameters may be specified.\n"
edac493d
RG
1629 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1630 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1631 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1632 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1633 "\n"
1634 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1635 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1636 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1637 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
77e6c2f0
RW
1638#ifdef CONFIG_MTD_UBI_FASTMAP
1639module_param(fm_autoconvert, bool, 0644);
1640MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1641#endif
801c135c
AB
1642MODULE_VERSION(__stringify(UBI_VERSION));
1643MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1644MODULE_AUTHOR("Artem Bityutskiy");
1645MODULE_LICENSE("GPL");