Merge commit 'v2.6.26' into core/locking
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mtd / nand / nandsim.c
1 /*
2 * NAND flash simulator.
3 *
4 * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
5 *
6 * Copyright (C) 2004 Nokia Corporation
7 *
8 * Note: NS means "NAND Simulator".
9 * Note: Input means input TO flash chip, output means output FROM chip.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2, or (at your option) any later
14 * version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
19 * Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
24 *
25 * $Id: nandsim.c,v 1.8 2005/03/19 15:33:56 dedekind Exp $
26 */
27
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/vmalloc.h>
33 #include <linux/slab.h>
34 #include <linux/errno.h>
35 #include <linux/string.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/partitions.h>
39 #include <linux/delay.h>
40 #include <linux/list.h>
41 #include <linux/random.h>
42
43 /* Default simulator parameters values */
44 #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
45 !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
46 !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
47 !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
48 #define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
49 #define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
50 #define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
51 #define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
52 #endif
53
54 #ifndef CONFIG_NANDSIM_ACCESS_DELAY
55 #define CONFIG_NANDSIM_ACCESS_DELAY 25
56 #endif
57 #ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
58 #define CONFIG_NANDSIM_PROGRAMM_DELAY 200
59 #endif
60 #ifndef CONFIG_NANDSIM_ERASE_DELAY
61 #define CONFIG_NANDSIM_ERASE_DELAY 2
62 #endif
63 #ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
64 #define CONFIG_NANDSIM_OUTPUT_CYCLE 40
65 #endif
66 #ifndef CONFIG_NANDSIM_INPUT_CYCLE
67 #define CONFIG_NANDSIM_INPUT_CYCLE 50
68 #endif
69 #ifndef CONFIG_NANDSIM_BUS_WIDTH
70 #define CONFIG_NANDSIM_BUS_WIDTH 8
71 #endif
72 #ifndef CONFIG_NANDSIM_DO_DELAYS
73 #define CONFIG_NANDSIM_DO_DELAYS 0
74 #endif
75 #ifndef CONFIG_NANDSIM_LOG
76 #define CONFIG_NANDSIM_LOG 0
77 #endif
78 #ifndef CONFIG_NANDSIM_DBG
79 #define CONFIG_NANDSIM_DBG 0
80 #endif
81
82 static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
83 static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
84 static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
85 static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
86 static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
87 static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
88 static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
89 static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
90 static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
91 static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
92 static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
93 static uint log = CONFIG_NANDSIM_LOG;
94 static uint dbg = CONFIG_NANDSIM_DBG;
95 static unsigned long parts[MAX_MTD_DEVICES];
96 static unsigned int parts_num;
97 static char *badblocks = NULL;
98 static char *weakblocks = NULL;
99 static char *weakpages = NULL;
100 static unsigned int bitflips = 0;
101 static char *gravepages = NULL;
102 static unsigned int rptwear = 0;
103 static unsigned int overridesize = 0;
104
105 module_param(first_id_byte, uint, 0400);
106 module_param(second_id_byte, uint, 0400);
107 module_param(third_id_byte, uint, 0400);
108 module_param(fourth_id_byte, uint, 0400);
109 module_param(access_delay, uint, 0400);
110 module_param(programm_delay, uint, 0400);
111 module_param(erase_delay, uint, 0400);
112 module_param(output_cycle, uint, 0400);
113 module_param(input_cycle, uint, 0400);
114 module_param(bus_width, uint, 0400);
115 module_param(do_delays, uint, 0400);
116 module_param(log, uint, 0400);
117 module_param(dbg, uint, 0400);
118 module_param_array(parts, ulong, &parts_num, 0400);
119 module_param(badblocks, charp, 0400);
120 module_param(weakblocks, charp, 0400);
121 module_param(weakpages, charp, 0400);
122 module_param(bitflips, uint, 0400);
123 module_param(gravepages, charp, 0400);
124 module_param(rptwear, uint, 0400);
125 module_param(overridesize, uint, 0400);
126
127 MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
128 MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
129 MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
130 MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
131 MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)");
132 MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
133 MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
134 MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
135 MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
136 MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
137 MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
138 MODULE_PARM_DESC(log, "Perform logging if not zero");
139 MODULE_PARM_DESC(dbg, "Output debug information if not zero");
140 MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
141 /* Page and erase block positions for the following parameters are independent of any partitions */
142 MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
143 MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
144 " separated by commas e.g. 113:2 means eb 113"
145 " can be erased only twice before failing");
146 MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
147 " separated by commas e.g. 1401:2 means page 1401"
148 " can be written only twice before failing");
149 MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
150 MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
151 " separated by commas e.g. 1401:2 means page 1401"
152 " can be read only twice before failing");
153 MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if not zero");
154 MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
155 "The size is specified in erase blocks and as the exponent of a power of two"
156 " e.g. 5 means a size of 32 erase blocks");
157
158 /* The largest possible page size */
159 #define NS_LARGEST_PAGE_SIZE 2048
160
161 /* The prefix for simulator output */
162 #define NS_OUTPUT_PREFIX "[nandsim]"
163
164 /* Simulator's output macros (logging, debugging, warning, error) */
165 #define NS_LOG(args...) \
166 do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
167 #define NS_DBG(args...) \
168 do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
169 #define NS_WARN(args...) \
170 do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
171 #define NS_ERR(args...) \
172 do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
173 #define NS_INFO(args...) \
174 do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
175
176 /* Busy-wait delay macros (microseconds, milliseconds) */
177 #define NS_UDELAY(us) \
178 do { if (do_delays) udelay(us); } while(0)
179 #define NS_MDELAY(us) \
180 do { if (do_delays) mdelay(us); } while(0)
181
182 /* Is the nandsim structure initialized ? */
183 #define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
184
185 /* Good operation completion status */
186 #define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
187
188 /* Operation failed completion status */
189 #define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
190
191 /* Calculate the page offset in flash RAM image by (row, column) address */
192 #define NS_RAW_OFFSET(ns) \
193 (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
194
195 /* Calculate the OOB offset in flash RAM image by (row, column) address */
196 #define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
197
198 /* After a command is input, the simulator goes to one of the following states */
199 #define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
200 #define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
201 #define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
202 #define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
203 #define STATE_CMD_READOOB 0x00000005 /* read OOB area */
204 #define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
205 #define STATE_CMD_STATUS 0x00000007 /* read status */
206 #define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
207 #define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */
208 #define STATE_CMD_READID 0x0000000A /* read ID */
209 #define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
210 #define STATE_CMD_RESET 0x0000000C /* reset */
211 #define STATE_CMD_MASK 0x0000000F /* command states mask */
212
213 /* After an address is input, the simulator goes to one of these states */
214 #define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
215 #define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
216 #define STATE_ADDR_ZERO 0x00000030 /* one byte zero address was accepted */
217 #define STATE_ADDR_MASK 0x00000030 /* address states mask */
218
219 /* Durind data input/output the simulator is in these states */
220 #define STATE_DATAIN 0x00000100 /* waiting for data input */
221 #define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
222
223 #define STATE_DATAOUT 0x00001000 /* waiting for page data output */
224 #define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
225 #define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
226 #define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
227 #define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
228
229 /* Previous operation is done, ready to accept new requests */
230 #define STATE_READY 0x00000000
231
232 /* This state is used to mark that the next state isn't known yet */
233 #define STATE_UNKNOWN 0x10000000
234
235 /* Simulator's actions bit masks */
236 #define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
237 #define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */
238 #define ACTION_SECERASE 0x00300000 /* erase sector */
239 #define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
240 #define ACTION_HALFOFF 0x00500000 /* add to address half of page */
241 #define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
242 #define ACTION_MASK 0x00700000 /* action mask */
243
244 #define NS_OPER_NUM 12 /* Number of operations supported by the simulator */
245 #define NS_OPER_STATES 6 /* Maximum number of states in operation */
246
247 #define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
248 #define OPT_PAGE256 0x00000001 /* 256-byte page chips */
249 #define OPT_PAGE512 0x00000002 /* 512-byte page chips */
250 #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
251 #define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
252 #define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
253 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
254 #define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */
255 #define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
256
257 /* Remove action bits ftom state */
258 #define NS_STATE(x) ((x) & ~ACTION_MASK)
259
260 /*
261 * Maximum previous states which need to be saved. Currently saving is
262 * only needed for page programm operation with preceeded read command
263 * (which is only valid for 512-byte pages).
264 */
265 #define NS_MAX_PREVSTATES 1
266
267 /*
268 * A union to represent flash memory contents and flash buffer.
269 */
270 union ns_mem {
271 u_char *byte; /* for byte access */
272 uint16_t *word; /* for 16-bit word access */
273 };
274
275 /*
276 * The structure which describes all the internal simulator data.
277 */
278 struct nandsim {
279 struct mtd_partition partitions[MAX_MTD_DEVICES];
280 unsigned int nbparts;
281
282 uint busw; /* flash chip bus width (8 or 16) */
283 u_char ids[4]; /* chip's ID bytes */
284 uint32_t options; /* chip's characteristic bits */
285 uint32_t state; /* current chip state */
286 uint32_t nxstate; /* next expected state */
287
288 uint32_t *op; /* current operation, NULL operations isn't known yet */
289 uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
290 uint16_t npstates; /* number of previous states saved */
291 uint16_t stateidx; /* current state index */
292
293 /* The simulated NAND flash pages array */
294 union ns_mem *pages;
295
296 /* Internal buffer of page + OOB size bytes */
297 union ns_mem buf;
298
299 /* NAND flash "geometry" */
300 struct nandsin_geometry {
301 uint32_t totsz; /* total flash size, bytes */
302 uint32_t secsz; /* flash sector (erase block) size, bytes */
303 uint pgsz; /* NAND flash page size, bytes */
304 uint oobsz; /* page OOB area size, bytes */
305 uint32_t totszoob; /* total flash size including OOB, bytes */
306 uint pgszoob; /* page size including OOB , bytes*/
307 uint secszoob; /* sector size including OOB, bytes */
308 uint pgnum; /* total number of pages */
309 uint pgsec; /* number of pages per sector */
310 uint secshift; /* bits number in sector size */
311 uint pgshift; /* bits number in page size */
312 uint oobshift; /* bits number in OOB size */
313 uint pgaddrbytes; /* bytes per page address */
314 uint secaddrbytes; /* bytes per sector address */
315 uint idbytes; /* the number ID bytes that this chip outputs */
316 } geom;
317
318 /* NAND flash internal registers */
319 struct nandsim_regs {
320 unsigned command; /* the command register */
321 u_char status; /* the status register */
322 uint row; /* the page number */
323 uint column; /* the offset within page */
324 uint count; /* internal counter */
325 uint num; /* number of bytes which must be processed */
326 uint off; /* fixed page offset */
327 } regs;
328
329 /* NAND flash lines state */
330 struct ns_lines_status {
331 int ce; /* chip Enable */
332 int cle; /* command Latch Enable */
333 int ale; /* address Latch Enable */
334 int wp; /* write Protect */
335 } lines;
336 };
337
338 /*
339 * Operations array. To perform any operation the simulator must pass
340 * through the correspondent states chain.
341 */
342 static struct nandsim_operations {
343 uint32_t reqopts; /* options which are required to perform the operation */
344 uint32_t states[NS_OPER_STATES]; /* operation's states */
345 } ops[NS_OPER_NUM] = {
346 /* Read page + OOB from the beginning */
347 {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
348 STATE_DATAOUT, STATE_READY}},
349 /* Read page + OOB from the second half */
350 {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
351 STATE_DATAOUT, STATE_READY}},
352 /* Read OOB */
353 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
354 STATE_DATAOUT, STATE_READY}},
355 /* Programm page starting from the beginning */
356 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
357 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
358 /* Programm page starting from the beginning */
359 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
360 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
361 /* Programm page starting from the second half */
362 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
363 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
364 /* Programm OOB */
365 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
366 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
367 /* Erase sector */
368 {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
369 /* Read status */
370 {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
371 /* Read multi-plane status */
372 {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
373 /* Read ID */
374 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
375 /* Large page devices read page */
376 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
377 STATE_DATAOUT, STATE_READY}}
378 };
379
380 struct weak_block {
381 struct list_head list;
382 unsigned int erase_block_no;
383 unsigned int max_erases;
384 unsigned int erases_done;
385 };
386
387 static LIST_HEAD(weak_blocks);
388
389 struct weak_page {
390 struct list_head list;
391 unsigned int page_no;
392 unsigned int max_writes;
393 unsigned int writes_done;
394 };
395
396 static LIST_HEAD(weak_pages);
397
398 struct grave_page {
399 struct list_head list;
400 unsigned int page_no;
401 unsigned int max_reads;
402 unsigned int reads_done;
403 };
404
405 static LIST_HEAD(grave_pages);
406
407 static unsigned long *erase_block_wear = NULL;
408 static unsigned int wear_eb_count = 0;
409 static unsigned long total_wear = 0;
410 static unsigned int rptwear_cnt = 0;
411
412 /* MTD structure for NAND controller */
413 static struct mtd_info *nsmtd;
414
415 static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
416
417 /*
418 * Allocate array of page pointers and initialize the array to NULL
419 * pointers.
420 *
421 * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
422 */
423 static int alloc_device(struct nandsim *ns)
424 {
425 int i;
426
427 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
428 if (!ns->pages) {
429 NS_ERR("alloc_map: unable to allocate page array\n");
430 return -ENOMEM;
431 }
432 for (i = 0; i < ns->geom.pgnum; i++) {
433 ns->pages[i].byte = NULL;
434 }
435
436 return 0;
437 }
438
439 /*
440 * Free any allocated pages, and free the array of page pointers.
441 */
442 static void free_device(struct nandsim *ns)
443 {
444 int i;
445
446 if (ns->pages) {
447 for (i = 0; i < ns->geom.pgnum; i++) {
448 if (ns->pages[i].byte)
449 kfree(ns->pages[i].byte);
450 }
451 vfree(ns->pages);
452 }
453 }
454
455 static char *get_partition_name(int i)
456 {
457 char buf[64];
458 sprintf(buf, "NAND simulator partition %d", i);
459 return kstrdup(buf, GFP_KERNEL);
460 }
461
462 /*
463 * Initialize the nandsim structure.
464 *
465 * RETURNS: 0 if success, -ERRNO if failure.
466 */
467 static int init_nandsim(struct mtd_info *mtd)
468 {
469 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
470 struct nandsim *ns = (struct nandsim *)(chip->priv);
471 int i, ret = 0;
472 u_int32_t remains;
473 u_int32_t next_offset;
474
475 if (NS_IS_INITIALIZED(ns)) {
476 NS_ERR("init_nandsim: nandsim is already initialized\n");
477 return -EIO;
478 }
479
480 /* Force mtd to not do delays */
481 chip->chip_delay = 0;
482
483 /* Initialize the NAND flash parameters */
484 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
485 ns->geom.totsz = mtd->size;
486 ns->geom.pgsz = mtd->writesize;
487 ns->geom.oobsz = mtd->oobsize;
488 ns->geom.secsz = mtd->erasesize;
489 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
490 ns->geom.pgnum = ns->geom.totsz / ns->geom.pgsz;
491 ns->geom.totszoob = ns->geom.totsz + ns->geom.pgnum * ns->geom.oobsz;
492 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
493 ns->geom.pgshift = chip->page_shift;
494 ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
495 ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
496 ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
497 ns->options = 0;
498
499 if (ns->geom.pgsz == 256) {
500 ns->options |= OPT_PAGE256;
501 }
502 else if (ns->geom.pgsz == 512) {
503 ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
504 if (ns->busw == 8)
505 ns->options |= OPT_PAGE512_8BIT;
506 } else if (ns->geom.pgsz == 2048) {
507 ns->options |= OPT_PAGE2048;
508 } else {
509 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
510 return -EIO;
511 }
512
513 if (ns->options & OPT_SMALLPAGE) {
514 if (ns->geom.totsz < (32 << 20)) {
515 ns->geom.pgaddrbytes = 3;
516 ns->geom.secaddrbytes = 2;
517 } else {
518 ns->geom.pgaddrbytes = 4;
519 ns->geom.secaddrbytes = 3;
520 }
521 } else {
522 if (ns->geom.totsz <= (128 << 20)) {
523 ns->geom.pgaddrbytes = 4;
524 ns->geom.secaddrbytes = 2;
525 } else {
526 ns->geom.pgaddrbytes = 5;
527 ns->geom.secaddrbytes = 3;
528 }
529 }
530
531 /* Fill the partition_info structure */
532 if (parts_num > ARRAY_SIZE(ns->partitions)) {
533 NS_ERR("too many partitions.\n");
534 ret = -EINVAL;
535 goto error;
536 }
537 remains = ns->geom.totsz;
538 next_offset = 0;
539 for (i = 0; i < parts_num; ++i) {
540 unsigned long part = parts[i];
541 if (!part || part > remains / ns->geom.secsz) {
542 NS_ERR("bad partition size.\n");
543 ret = -EINVAL;
544 goto error;
545 }
546 ns->partitions[i].name = get_partition_name(i);
547 ns->partitions[i].offset = next_offset;
548 ns->partitions[i].size = part * ns->geom.secsz;
549 next_offset += ns->partitions[i].size;
550 remains -= ns->partitions[i].size;
551 }
552 ns->nbparts = parts_num;
553 if (remains) {
554 if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
555 NS_ERR("too many partitions.\n");
556 ret = -EINVAL;
557 goto error;
558 }
559 ns->partitions[i].name = get_partition_name(i);
560 ns->partitions[i].offset = next_offset;
561 ns->partitions[i].size = remains;
562 ns->nbparts += 1;
563 }
564
565 /* Detect how many ID bytes the NAND chip outputs */
566 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
567 if (second_id_byte != nand_flash_ids[i].id)
568 continue;
569 if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
570 ns->options |= OPT_AUTOINCR;
571 }
572
573 if (ns->busw == 16)
574 NS_WARN("16-bit flashes support wasn't tested\n");
575
576 printk("flash size: %u MiB\n", ns->geom.totsz >> 20);
577 printk("page size: %u bytes\n", ns->geom.pgsz);
578 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
579 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
580 printk("pages number: %u\n", ns->geom.pgnum);
581 printk("pages per sector: %u\n", ns->geom.pgsec);
582 printk("bus width: %u\n", ns->busw);
583 printk("bits in sector size: %u\n", ns->geom.secshift);
584 printk("bits in page size: %u\n", ns->geom.pgshift);
585 printk("bits in OOB size: %u\n", ns->geom.oobshift);
586 printk("flash size with OOB: %u KiB\n", ns->geom.totszoob >> 10);
587 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
588 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
589 printk("options: %#x\n", ns->options);
590
591 if ((ret = alloc_device(ns)) != 0)
592 goto error;
593
594 /* Allocate / initialize the internal buffer */
595 ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
596 if (!ns->buf.byte) {
597 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
598 ns->geom.pgszoob);
599 ret = -ENOMEM;
600 goto error;
601 }
602 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
603
604 return 0;
605
606 error:
607 free_device(ns);
608
609 return ret;
610 }
611
612 /*
613 * Free the nandsim structure.
614 */
615 static void free_nandsim(struct nandsim *ns)
616 {
617 kfree(ns->buf.byte);
618 free_device(ns);
619
620 return;
621 }
622
623 static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
624 {
625 char *w;
626 int zero_ok;
627 unsigned int erase_block_no;
628 loff_t offset;
629
630 if (!badblocks)
631 return 0;
632 w = badblocks;
633 do {
634 zero_ok = (*w == '0' ? 1 : 0);
635 erase_block_no = simple_strtoul(w, &w, 0);
636 if (!zero_ok && !erase_block_no) {
637 NS_ERR("invalid badblocks.\n");
638 return -EINVAL;
639 }
640 offset = erase_block_no * ns->geom.secsz;
641 if (mtd->block_markbad(mtd, offset)) {
642 NS_ERR("invalid badblocks.\n");
643 return -EINVAL;
644 }
645 if (*w == ',')
646 w += 1;
647 } while (*w);
648 return 0;
649 }
650
651 static int parse_weakblocks(void)
652 {
653 char *w;
654 int zero_ok;
655 unsigned int erase_block_no;
656 unsigned int max_erases;
657 struct weak_block *wb;
658
659 if (!weakblocks)
660 return 0;
661 w = weakblocks;
662 do {
663 zero_ok = (*w == '0' ? 1 : 0);
664 erase_block_no = simple_strtoul(w, &w, 0);
665 if (!zero_ok && !erase_block_no) {
666 NS_ERR("invalid weakblocks.\n");
667 return -EINVAL;
668 }
669 max_erases = 3;
670 if (*w == ':') {
671 w += 1;
672 max_erases = simple_strtoul(w, &w, 0);
673 }
674 if (*w == ',')
675 w += 1;
676 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
677 if (!wb) {
678 NS_ERR("unable to allocate memory.\n");
679 return -ENOMEM;
680 }
681 wb->erase_block_no = erase_block_no;
682 wb->max_erases = max_erases;
683 list_add(&wb->list, &weak_blocks);
684 } while (*w);
685 return 0;
686 }
687
688 static int erase_error(unsigned int erase_block_no)
689 {
690 struct weak_block *wb;
691
692 list_for_each_entry(wb, &weak_blocks, list)
693 if (wb->erase_block_no == erase_block_no) {
694 if (wb->erases_done >= wb->max_erases)
695 return 1;
696 wb->erases_done += 1;
697 return 0;
698 }
699 return 0;
700 }
701
702 static int parse_weakpages(void)
703 {
704 char *w;
705 int zero_ok;
706 unsigned int page_no;
707 unsigned int max_writes;
708 struct weak_page *wp;
709
710 if (!weakpages)
711 return 0;
712 w = weakpages;
713 do {
714 zero_ok = (*w == '0' ? 1 : 0);
715 page_no = simple_strtoul(w, &w, 0);
716 if (!zero_ok && !page_no) {
717 NS_ERR("invalid weakpagess.\n");
718 return -EINVAL;
719 }
720 max_writes = 3;
721 if (*w == ':') {
722 w += 1;
723 max_writes = simple_strtoul(w, &w, 0);
724 }
725 if (*w == ',')
726 w += 1;
727 wp = kzalloc(sizeof(*wp), GFP_KERNEL);
728 if (!wp) {
729 NS_ERR("unable to allocate memory.\n");
730 return -ENOMEM;
731 }
732 wp->page_no = page_no;
733 wp->max_writes = max_writes;
734 list_add(&wp->list, &weak_pages);
735 } while (*w);
736 return 0;
737 }
738
739 static int write_error(unsigned int page_no)
740 {
741 struct weak_page *wp;
742
743 list_for_each_entry(wp, &weak_pages, list)
744 if (wp->page_no == page_no) {
745 if (wp->writes_done >= wp->max_writes)
746 return 1;
747 wp->writes_done += 1;
748 return 0;
749 }
750 return 0;
751 }
752
753 static int parse_gravepages(void)
754 {
755 char *g;
756 int zero_ok;
757 unsigned int page_no;
758 unsigned int max_reads;
759 struct grave_page *gp;
760
761 if (!gravepages)
762 return 0;
763 g = gravepages;
764 do {
765 zero_ok = (*g == '0' ? 1 : 0);
766 page_no = simple_strtoul(g, &g, 0);
767 if (!zero_ok && !page_no) {
768 NS_ERR("invalid gravepagess.\n");
769 return -EINVAL;
770 }
771 max_reads = 3;
772 if (*g == ':') {
773 g += 1;
774 max_reads = simple_strtoul(g, &g, 0);
775 }
776 if (*g == ',')
777 g += 1;
778 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
779 if (!gp) {
780 NS_ERR("unable to allocate memory.\n");
781 return -ENOMEM;
782 }
783 gp->page_no = page_no;
784 gp->max_reads = max_reads;
785 list_add(&gp->list, &grave_pages);
786 } while (*g);
787 return 0;
788 }
789
790 static int read_error(unsigned int page_no)
791 {
792 struct grave_page *gp;
793
794 list_for_each_entry(gp, &grave_pages, list)
795 if (gp->page_no == page_no) {
796 if (gp->reads_done >= gp->max_reads)
797 return 1;
798 gp->reads_done += 1;
799 return 0;
800 }
801 return 0;
802 }
803
804 static void free_lists(void)
805 {
806 struct list_head *pos, *n;
807 list_for_each_safe(pos, n, &weak_blocks) {
808 list_del(pos);
809 kfree(list_entry(pos, struct weak_block, list));
810 }
811 list_for_each_safe(pos, n, &weak_pages) {
812 list_del(pos);
813 kfree(list_entry(pos, struct weak_page, list));
814 }
815 list_for_each_safe(pos, n, &grave_pages) {
816 list_del(pos);
817 kfree(list_entry(pos, struct grave_page, list));
818 }
819 kfree(erase_block_wear);
820 }
821
822 static int setup_wear_reporting(struct mtd_info *mtd)
823 {
824 size_t mem;
825
826 if (!rptwear)
827 return 0;
828 wear_eb_count = mtd->size / mtd->erasesize;
829 mem = wear_eb_count * sizeof(unsigned long);
830 if (mem / sizeof(unsigned long) != wear_eb_count) {
831 NS_ERR("Too many erase blocks for wear reporting\n");
832 return -ENOMEM;
833 }
834 erase_block_wear = kzalloc(mem, GFP_KERNEL);
835 if (!erase_block_wear) {
836 NS_ERR("Too many erase blocks for wear reporting\n");
837 return -ENOMEM;
838 }
839 return 0;
840 }
841
842 static void update_wear(unsigned int erase_block_no)
843 {
844 unsigned long wmin = -1, wmax = 0, avg;
845 unsigned long deciles[10], decile_max[10], tot = 0;
846 unsigned int i;
847
848 if (!erase_block_wear)
849 return;
850 total_wear += 1;
851 if (total_wear == 0)
852 NS_ERR("Erase counter total overflow\n");
853 erase_block_wear[erase_block_no] += 1;
854 if (erase_block_wear[erase_block_no] == 0)
855 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
856 rptwear_cnt += 1;
857 if (rptwear_cnt < rptwear)
858 return;
859 rptwear_cnt = 0;
860 /* Calc wear stats */
861 for (i = 0; i < wear_eb_count; ++i) {
862 unsigned long wear = erase_block_wear[i];
863 if (wear < wmin)
864 wmin = wear;
865 if (wear > wmax)
866 wmax = wear;
867 tot += wear;
868 }
869 for (i = 0; i < 9; ++i) {
870 deciles[i] = 0;
871 decile_max[i] = (wmax * (i + 1) + 5) / 10;
872 }
873 deciles[9] = 0;
874 decile_max[9] = wmax;
875 for (i = 0; i < wear_eb_count; ++i) {
876 int d;
877 unsigned long wear = erase_block_wear[i];
878 for (d = 0; d < 10; ++d)
879 if (wear <= decile_max[d]) {
880 deciles[d] += 1;
881 break;
882 }
883 }
884 avg = tot / wear_eb_count;
885 /* Output wear report */
886 NS_INFO("*** Wear Report ***\n");
887 NS_INFO("Total numbers of erases: %lu\n", tot);
888 NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
889 NS_INFO("Average number of erases: %lu\n", avg);
890 NS_INFO("Maximum number of erases: %lu\n", wmax);
891 NS_INFO("Minimum number of erases: %lu\n", wmin);
892 for (i = 0; i < 10; ++i) {
893 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
894 if (from > decile_max[i])
895 continue;
896 NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
897 from,
898 decile_max[i],
899 deciles[i]);
900 }
901 NS_INFO("*** End of Wear Report ***\n");
902 }
903
904 /*
905 * Returns the string representation of 'state' state.
906 */
907 static char *get_state_name(uint32_t state)
908 {
909 switch (NS_STATE(state)) {
910 case STATE_CMD_READ0:
911 return "STATE_CMD_READ0";
912 case STATE_CMD_READ1:
913 return "STATE_CMD_READ1";
914 case STATE_CMD_PAGEPROG:
915 return "STATE_CMD_PAGEPROG";
916 case STATE_CMD_READOOB:
917 return "STATE_CMD_READOOB";
918 case STATE_CMD_READSTART:
919 return "STATE_CMD_READSTART";
920 case STATE_CMD_ERASE1:
921 return "STATE_CMD_ERASE1";
922 case STATE_CMD_STATUS:
923 return "STATE_CMD_STATUS";
924 case STATE_CMD_STATUS_M:
925 return "STATE_CMD_STATUS_M";
926 case STATE_CMD_SEQIN:
927 return "STATE_CMD_SEQIN";
928 case STATE_CMD_READID:
929 return "STATE_CMD_READID";
930 case STATE_CMD_ERASE2:
931 return "STATE_CMD_ERASE2";
932 case STATE_CMD_RESET:
933 return "STATE_CMD_RESET";
934 case STATE_ADDR_PAGE:
935 return "STATE_ADDR_PAGE";
936 case STATE_ADDR_SEC:
937 return "STATE_ADDR_SEC";
938 case STATE_ADDR_ZERO:
939 return "STATE_ADDR_ZERO";
940 case STATE_DATAIN:
941 return "STATE_DATAIN";
942 case STATE_DATAOUT:
943 return "STATE_DATAOUT";
944 case STATE_DATAOUT_ID:
945 return "STATE_DATAOUT_ID";
946 case STATE_DATAOUT_STATUS:
947 return "STATE_DATAOUT_STATUS";
948 case STATE_DATAOUT_STATUS_M:
949 return "STATE_DATAOUT_STATUS_M";
950 case STATE_READY:
951 return "STATE_READY";
952 case STATE_UNKNOWN:
953 return "STATE_UNKNOWN";
954 }
955
956 NS_ERR("get_state_name: unknown state, BUG\n");
957 return NULL;
958 }
959
960 /*
961 * Check if command is valid.
962 *
963 * RETURNS: 1 if wrong command, 0 if right.
964 */
965 static int check_command(int cmd)
966 {
967 switch (cmd) {
968
969 case NAND_CMD_READ0:
970 case NAND_CMD_READSTART:
971 case NAND_CMD_PAGEPROG:
972 case NAND_CMD_READOOB:
973 case NAND_CMD_ERASE1:
974 case NAND_CMD_STATUS:
975 case NAND_CMD_SEQIN:
976 case NAND_CMD_READID:
977 case NAND_CMD_ERASE2:
978 case NAND_CMD_RESET:
979 case NAND_CMD_READ1:
980 return 0;
981
982 case NAND_CMD_STATUS_MULTI:
983 default:
984 return 1;
985 }
986 }
987
988 /*
989 * Returns state after command is accepted by command number.
990 */
991 static uint32_t get_state_by_command(unsigned command)
992 {
993 switch (command) {
994 case NAND_CMD_READ0:
995 return STATE_CMD_READ0;
996 case NAND_CMD_READ1:
997 return STATE_CMD_READ1;
998 case NAND_CMD_PAGEPROG:
999 return STATE_CMD_PAGEPROG;
1000 case NAND_CMD_READSTART:
1001 return STATE_CMD_READSTART;
1002 case NAND_CMD_READOOB:
1003 return STATE_CMD_READOOB;
1004 case NAND_CMD_ERASE1:
1005 return STATE_CMD_ERASE1;
1006 case NAND_CMD_STATUS:
1007 return STATE_CMD_STATUS;
1008 case NAND_CMD_STATUS_MULTI:
1009 return STATE_CMD_STATUS_M;
1010 case NAND_CMD_SEQIN:
1011 return STATE_CMD_SEQIN;
1012 case NAND_CMD_READID:
1013 return STATE_CMD_READID;
1014 case NAND_CMD_ERASE2:
1015 return STATE_CMD_ERASE2;
1016 case NAND_CMD_RESET:
1017 return STATE_CMD_RESET;
1018 }
1019
1020 NS_ERR("get_state_by_command: unknown command, BUG\n");
1021 return 0;
1022 }
1023
1024 /*
1025 * Move an address byte to the correspondent internal register.
1026 */
1027 static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
1028 {
1029 uint byte = (uint)bt;
1030
1031 if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
1032 ns->regs.column |= (byte << 8 * ns->regs.count);
1033 else {
1034 ns->regs.row |= (byte << 8 * (ns->regs.count -
1035 ns->geom.pgaddrbytes +
1036 ns->geom.secaddrbytes));
1037 }
1038
1039 return;
1040 }
1041
1042 /*
1043 * Switch to STATE_READY state.
1044 */
1045 static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1046 {
1047 NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
1048
1049 ns->state = STATE_READY;
1050 ns->nxstate = STATE_UNKNOWN;
1051 ns->op = NULL;
1052 ns->npstates = 0;
1053 ns->stateidx = 0;
1054 ns->regs.num = 0;
1055 ns->regs.count = 0;
1056 ns->regs.off = 0;
1057 ns->regs.row = 0;
1058 ns->regs.column = 0;
1059 ns->regs.status = status;
1060 }
1061
1062 /*
1063 * If the operation isn't known yet, try to find it in the global array
1064 * of supported operations.
1065 *
1066 * Operation can be unknown because of the following.
1067 * 1. New command was accepted and this is the firs call to find the
1068 * correspondent states chain. In this case ns->npstates = 0;
1069 * 2. There is several operations which begin with the same command(s)
1070 * (for example program from the second half and read from the
1071 * second half operations both begin with the READ1 command). In this
1072 * case the ns->pstates[] array contains previous states.
1073 *
1074 * Thus, the function tries to find operation containing the following
1075 * states (if the 'flag' parameter is 0):
1076 * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
1077 *
1078 * If (one and only one) matching operation is found, it is accepted (
1079 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
1080 * zeroed).
1081 *
1082 * If there are several maches, the current state is pushed to the
1083 * ns->pstates.
1084 *
1085 * The operation can be unknown only while commands are input to the chip.
1086 * As soon as address command is accepted, the operation must be known.
1087 * In such situation the function is called with 'flag' != 0, and the
1088 * operation is searched using the following pattern:
1089 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
1090 *
1091 * It is supposed that this pattern must either match one operation on
1092 * none. There can't be ambiguity in that case.
1093 *
1094 * If no matches found, the functions does the following:
1095 * 1. if there are saved states present, try to ignore them and search
1096 * again only using the last command. If nothing was found, switch
1097 * to the STATE_READY state.
1098 * 2. if there are no saved states, switch to the STATE_READY state.
1099 *
1100 * RETURNS: -2 - no matched operations found.
1101 * -1 - several matches.
1102 * 0 - operation is found.
1103 */
1104 static int find_operation(struct nandsim *ns, uint32_t flag)
1105 {
1106 int opsfound = 0;
1107 int i, j, idx = 0;
1108
1109 for (i = 0; i < NS_OPER_NUM; i++) {
1110
1111 int found = 1;
1112
1113 if (!(ns->options & ops[i].reqopts))
1114 /* Ignore operations we can't perform */
1115 continue;
1116
1117 if (flag) {
1118 if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
1119 continue;
1120 } else {
1121 if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
1122 continue;
1123 }
1124
1125 for (j = 0; j < ns->npstates; j++)
1126 if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
1127 && (ns->options & ops[idx].reqopts)) {
1128 found = 0;
1129 break;
1130 }
1131
1132 if (found) {
1133 idx = i;
1134 opsfound += 1;
1135 }
1136 }
1137
1138 if (opsfound == 1) {
1139 /* Exact match */
1140 ns->op = &ops[idx].states[0];
1141 if (flag) {
1142 /*
1143 * In this case the find_operation function was
1144 * called when address has just began input. But it isn't
1145 * yet fully input and the current state must
1146 * not be one of STATE_ADDR_*, but the STATE_ADDR_*
1147 * state must be the next state (ns->nxstate).
1148 */
1149 ns->stateidx = ns->npstates - 1;
1150 } else {
1151 ns->stateidx = ns->npstates;
1152 }
1153 ns->npstates = 0;
1154 ns->state = ns->op[ns->stateidx];
1155 ns->nxstate = ns->op[ns->stateidx + 1];
1156 NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
1157 idx, get_state_name(ns->state), get_state_name(ns->nxstate));
1158 return 0;
1159 }
1160
1161 if (opsfound == 0) {
1162 /* Nothing was found. Try to ignore previous commands (if any) and search again */
1163 if (ns->npstates != 0) {
1164 NS_DBG("find_operation: no operation found, try again with state %s\n",
1165 get_state_name(ns->state));
1166 ns->npstates = 0;
1167 return find_operation(ns, 0);
1168
1169 }
1170 NS_DBG("find_operation: no operations found\n");
1171 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1172 return -2;
1173 }
1174
1175 if (flag) {
1176 /* This shouldn't happen */
1177 NS_DBG("find_operation: BUG, operation must be known if address is input\n");
1178 return -2;
1179 }
1180
1181 NS_DBG("find_operation: there is still ambiguity\n");
1182
1183 ns->pstates[ns->npstates++] = ns->state;
1184
1185 return -1;
1186 }
1187
1188 /*
1189 * Returns a pointer to the current page.
1190 */
1191 static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
1192 {
1193 return &(ns->pages[ns->regs.row]);
1194 }
1195
1196 /*
1197 * Retuns a pointer to the current byte, within the current page.
1198 */
1199 static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1200 {
1201 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1202 }
1203
1204 /*
1205 * Fill the NAND buffer with data read from the specified page.
1206 */
1207 static void read_page(struct nandsim *ns, int num)
1208 {
1209 union ns_mem *mypage;
1210
1211 mypage = NS_GET_PAGE(ns);
1212 if (mypage->byte == NULL) {
1213 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1214 memset(ns->buf.byte, 0xFF, num);
1215 } else {
1216 unsigned int page_no = ns->regs.row;
1217 NS_DBG("read_page: page %d allocated, reading from %d\n",
1218 ns->regs.row, ns->regs.column + ns->regs.off);
1219 if (read_error(page_no)) {
1220 int i;
1221 memset(ns->buf.byte, 0xFF, num);
1222 for (i = 0; i < num; ++i)
1223 ns->buf.byte[i] = random32();
1224 NS_WARN("simulating read error in page %u\n", page_no);
1225 return;
1226 }
1227 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1228 if (bitflips && random32() < (1 << 22)) {
1229 int flips = 1;
1230 if (bitflips > 1)
1231 flips = (random32() % (int) bitflips) + 1;
1232 while (flips--) {
1233 int pos = random32() % (num * 8);
1234 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1235 NS_WARN("read_page: flipping bit %d in page %d "
1236 "reading from %d ecc: corrected=%u failed=%u\n",
1237 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1238 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1239 }
1240 }
1241 }
1242 }
1243
1244 /*
1245 * Erase all pages in the specified sector.
1246 */
1247 static void erase_sector(struct nandsim *ns)
1248 {
1249 union ns_mem *mypage;
1250 int i;
1251
1252 mypage = NS_GET_PAGE(ns);
1253 for (i = 0; i < ns->geom.pgsec; i++) {
1254 if (mypage->byte != NULL) {
1255 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1256 kfree(mypage->byte);
1257 mypage->byte = NULL;
1258 }
1259 mypage++;
1260 }
1261 }
1262
1263 /*
1264 * Program the specified page with the contents from the NAND buffer.
1265 */
1266 static int prog_page(struct nandsim *ns, int num)
1267 {
1268 int i;
1269 union ns_mem *mypage;
1270 u_char *pg_off;
1271
1272 mypage = NS_GET_PAGE(ns);
1273 if (mypage->byte == NULL) {
1274 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1275 /*
1276 * We allocate memory with GFP_NOFS because a flash FS may
1277 * utilize this. If it is holding an FS lock, then gets here,
1278 * then kmalloc runs writeback which goes to the FS again
1279 * and deadlocks. This was seen in practice.
1280 */
1281 mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS);
1282 if (mypage->byte == NULL) {
1283 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1284 return -1;
1285 }
1286 memset(mypage->byte, 0xFF, ns->geom.pgszoob);
1287 }
1288
1289 pg_off = NS_PAGE_BYTE_OFF(ns);
1290 for (i = 0; i < num; i++)
1291 pg_off[i] &= ns->buf.byte[i];
1292
1293 return 0;
1294 }
1295
1296 /*
1297 * If state has any action bit, perform this action.
1298 *
1299 * RETURNS: 0 if success, -1 if error.
1300 */
1301 static int do_state_action(struct nandsim *ns, uint32_t action)
1302 {
1303 int num;
1304 int busdiv = ns->busw == 8 ? 1 : 2;
1305 unsigned int erase_block_no, page_no;
1306
1307 action &= ACTION_MASK;
1308
1309 /* Check that page address input is correct */
1310 if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
1311 NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
1312 return -1;
1313 }
1314
1315 switch (action) {
1316
1317 case ACTION_CPY:
1318 /*
1319 * Copy page data to the internal buffer.
1320 */
1321
1322 /* Column shouldn't be very large */
1323 if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
1324 NS_ERR("do_state_action: column number is too large\n");
1325 break;
1326 }
1327 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1328 read_page(ns, num);
1329
1330 NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
1331 num, NS_RAW_OFFSET(ns) + ns->regs.off);
1332
1333 if (ns->regs.off == 0)
1334 NS_LOG("read page %d\n", ns->regs.row);
1335 else if (ns->regs.off < ns->geom.pgsz)
1336 NS_LOG("read page %d (second half)\n", ns->regs.row);
1337 else
1338 NS_LOG("read OOB of page %d\n", ns->regs.row);
1339
1340 NS_UDELAY(access_delay);
1341 NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
1342
1343 break;
1344
1345 case ACTION_SECERASE:
1346 /*
1347 * Erase sector.
1348 */
1349
1350 if (ns->lines.wp) {
1351 NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
1352 return -1;
1353 }
1354
1355 if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
1356 || (ns->regs.row & ~(ns->geom.secsz - 1))) {
1357 NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
1358 return -1;
1359 }
1360
1361 ns->regs.row = (ns->regs.row <<
1362 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
1363 ns->regs.column = 0;
1364
1365 erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
1366
1367 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
1368 ns->regs.row, NS_RAW_OFFSET(ns));
1369 NS_LOG("erase sector %u\n", erase_block_no);
1370
1371 erase_sector(ns);
1372
1373 NS_MDELAY(erase_delay);
1374
1375 if (erase_block_wear)
1376 update_wear(erase_block_no);
1377
1378 if (erase_error(erase_block_no)) {
1379 NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
1380 return -1;
1381 }
1382
1383 break;
1384
1385 case ACTION_PRGPAGE:
1386 /*
1387 * Programm page - move internal buffer data to the page.
1388 */
1389
1390 if (ns->lines.wp) {
1391 NS_WARN("do_state_action: device is write-protected, programm\n");
1392 return -1;
1393 }
1394
1395 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1396 if (num != ns->regs.count) {
1397 NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
1398 ns->regs.count, num);
1399 return -1;
1400 }
1401
1402 if (prog_page(ns, num) == -1)
1403 return -1;
1404
1405 page_no = ns->regs.row;
1406
1407 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
1408 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
1409 NS_LOG("programm page %d\n", ns->regs.row);
1410
1411 NS_UDELAY(programm_delay);
1412 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
1413
1414 if (write_error(page_no)) {
1415 NS_WARN("simulating write failure in page %u\n", page_no);
1416 return -1;
1417 }
1418
1419 break;
1420
1421 case ACTION_ZEROOFF:
1422 NS_DBG("do_state_action: set internal offset to 0\n");
1423 ns->regs.off = 0;
1424 break;
1425
1426 case ACTION_HALFOFF:
1427 if (!(ns->options & OPT_PAGE512_8BIT)) {
1428 NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
1429 "byte page size 8x chips\n");
1430 return -1;
1431 }
1432 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
1433 ns->regs.off = ns->geom.pgsz/2;
1434 break;
1435
1436 case ACTION_OOBOFF:
1437 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
1438 ns->regs.off = ns->geom.pgsz;
1439 break;
1440
1441 default:
1442 NS_DBG("do_state_action: BUG! unknown action\n");
1443 }
1444
1445 return 0;
1446 }
1447
1448 /*
1449 * Switch simulator's state.
1450 */
1451 static void switch_state(struct nandsim *ns)
1452 {
1453 if (ns->op) {
1454 /*
1455 * The current operation have already been identified.
1456 * Just follow the states chain.
1457 */
1458
1459 ns->stateidx += 1;
1460 ns->state = ns->nxstate;
1461 ns->nxstate = ns->op[ns->stateidx + 1];
1462
1463 NS_DBG("switch_state: operation is known, switch to the next state, "
1464 "state: %s, nxstate: %s\n",
1465 get_state_name(ns->state), get_state_name(ns->nxstate));
1466
1467 /* See, whether we need to do some action */
1468 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1469 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1470 return;
1471 }
1472
1473 } else {
1474 /*
1475 * We don't yet know which operation we perform.
1476 * Try to identify it.
1477 */
1478
1479 /*
1480 * The only event causing the switch_state function to
1481 * be called with yet unknown operation is new command.
1482 */
1483 ns->state = get_state_by_command(ns->regs.command);
1484
1485 NS_DBG("switch_state: operation is unknown, try to find it\n");
1486
1487 if (find_operation(ns, 0) != 0)
1488 return;
1489
1490 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1491 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1492 return;
1493 }
1494 }
1495
1496 /* For 16x devices column means the page offset in words */
1497 if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
1498 NS_DBG("switch_state: double the column number for 16x device\n");
1499 ns->regs.column <<= 1;
1500 }
1501
1502 if (NS_STATE(ns->nxstate) == STATE_READY) {
1503 /*
1504 * The current state is the last. Return to STATE_READY
1505 */
1506
1507 u_char status = NS_STATUS_OK(ns);
1508
1509 /* In case of data states, see if all bytes were input/output */
1510 if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
1511 && ns->regs.count != ns->regs.num) {
1512 NS_WARN("switch_state: not all bytes were processed, %d left\n",
1513 ns->regs.num - ns->regs.count);
1514 status = NS_STATUS_FAILED(ns);
1515 }
1516
1517 NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
1518
1519 switch_to_ready_state(ns, status);
1520
1521 return;
1522 } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
1523 /*
1524 * If the next state is data input/output, switch to it now
1525 */
1526
1527 ns->state = ns->nxstate;
1528 ns->nxstate = ns->op[++ns->stateidx + 1];
1529 ns->regs.num = ns->regs.count = 0;
1530
1531 NS_DBG("switch_state: the next state is data I/O, switch, "
1532 "state: %s, nxstate: %s\n",
1533 get_state_name(ns->state), get_state_name(ns->nxstate));
1534
1535 /*
1536 * Set the internal register to the count of bytes which
1537 * are expected to be input or output
1538 */
1539 switch (NS_STATE(ns->state)) {
1540 case STATE_DATAIN:
1541 case STATE_DATAOUT:
1542 ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1543 break;
1544
1545 case STATE_DATAOUT_ID:
1546 ns->regs.num = ns->geom.idbytes;
1547 break;
1548
1549 case STATE_DATAOUT_STATUS:
1550 case STATE_DATAOUT_STATUS_M:
1551 ns->regs.count = ns->regs.num = 0;
1552 break;
1553
1554 default:
1555 NS_ERR("switch_state: BUG! unknown data state\n");
1556 }
1557
1558 } else if (ns->nxstate & STATE_ADDR_MASK) {
1559 /*
1560 * If the next state is address input, set the internal
1561 * register to the number of expected address bytes
1562 */
1563
1564 ns->regs.count = 0;
1565
1566 switch (NS_STATE(ns->nxstate)) {
1567 case STATE_ADDR_PAGE:
1568 ns->regs.num = ns->geom.pgaddrbytes;
1569
1570 break;
1571 case STATE_ADDR_SEC:
1572 ns->regs.num = ns->geom.secaddrbytes;
1573 break;
1574
1575 case STATE_ADDR_ZERO:
1576 ns->regs.num = 1;
1577 break;
1578
1579 default:
1580 NS_ERR("switch_state: BUG! unknown address state\n");
1581 }
1582 } else {
1583 /*
1584 * Just reset internal counters.
1585 */
1586
1587 ns->regs.num = 0;
1588 ns->regs.count = 0;
1589 }
1590 }
1591
1592 static u_char ns_nand_read_byte(struct mtd_info *mtd)
1593 {
1594 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1595 u_char outb = 0x00;
1596
1597 /* Sanity and correctness checks */
1598 if (!ns->lines.ce) {
1599 NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
1600 return outb;
1601 }
1602 if (ns->lines.ale || ns->lines.cle) {
1603 NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
1604 return outb;
1605 }
1606 if (!(ns->state & STATE_DATAOUT_MASK)) {
1607 NS_WARN("read_byte: unexpected data output cycle, state is %s "
1608 "return %#x\n", get_state_name(ns->state), (uint)outb);
1609 return outb;
1610 }
1611
1612 /* Status register may be read as many times as it is wanted */
1613 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
1614 NS_DBG("read_byte: return %#x status\n", ns->regs.status);
1615 return ns->regs.status;
1616 }
1617
1618 /* Check if there is any data in the internal buffer which may be read */
1619 if (ns->regs.count == ns->regs.num) {
1620 NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
1621 return outb;
1622 }
1623
1624 switch (NS_STATE(ns->state)) {
1625 case STATE_DATAOUT:
1626 if (ns->busw == 8) {
1627 outb = ns->buf.byte[ns->regs.count];
1628 ns->regs.count += 1;
1629 } else {
1630 outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
1631 ns->regs.count += 2;
1632 }
1633 break;
1634 case STATE_DATAOUT_ID:
1635 NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
1636 outb = ns->ids[ns->regs.count];
1637 ns->regs.count += 1;
1638 break;
1639 default:
1640 BUG();
1641 }
1642
1643 if (ns->regs.count == ns->regs.num) {
1644 NS_DBG("read_byte: all bytes were read\n");
1645
1646 /*
1647 * The OPT_AUTOINCR allows to read next conseqitive pages without
1648 * new read operation cycle.
1649 */
1650 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1651 ns->regs.count = 0;
1652 if (ns->regs.row + 1 < ns->geom.pgnum)
1653 ns->regs.row += 1;
1654 NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
1655 do_state_action(ns, ACTION_CPY);
1656 }
1657 else if (NS_STATE(ns->nxstate) == STATE_READY)
1658 switch_state(ns);
1659
1660 }
1661
1662 return outb;
1663 }
1664
1665 static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1666 {
1667 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1668
1669 /* Sanity and correctness checks */
1670 if (!ns->lines.ce) {
1671 NS_ERR("write_byte: chip is disabled, ignore write\n");
1672 return;
1673 }
1674 if (ns->lines.ale && ns->lines.cle) {
1675 NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
1676 return;
1677 }
1678
1679 if (ns->lines.cle == 1) {
1680 /*
1681 * The byte written is a command.
1682 */
1683
1684 if (byte == NAND_CMD_RESET) {
1685 NS_LOG("reset chip\n");
1686 switch_to_ready_state(ns, NS_STATUS_OK(ns));
1687 return;
1688 }
1689
1690 /*
1691 * Chip might still be in STATE_DATAOUT
1692 * (if OPT_AUTOINCR feature is supported), STATE_DATAOUT_STATUS or
1693 * STATE_DATAOUT_STATUS_M state. If so, switch state.
1694 */
1695 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
1696 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
1697 || ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT))
1698 switch_state(ns);
1699
1700 /* Check if chip is expecting command */
1701 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1702 /*
1703 * We are in situation when something else (not command)
1704 * was expected but command was input. In this case ignore
1705 * previous command(s)/state(s) and accept the last one.
1706 */
1707 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
1708 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
1709 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1710 }
1711
1712 /* Check that the command byte is correct */
1713 if (check_command(byte)) {
1714 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1715 return;
1716 }
1717
1718 NS_DBG("command byte corresponding to %s state accepted\n",
1719 get_state_name(get_state_by_command(byte)));
1720 ns->regs.command = byte;
1721 switch_state(ns);
1722
1723 } else if (ns->lines.ale == 1) {
1724 /*
1725 * The byte written is an address.
1726 */
1727
1728 if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
1729
1730 NS_DBG("write_byte: operation isn't known yet, identify it\n");
1731
1732 if (find_operation(ns, 1) < 0)
1733 return;
1734
1735 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1736 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1737 return;
1738 }
1739
1740 ns->regs.count = 0;
1741 switch (NS_STATE(ns->nxstate)) {
1742 case STATE_ADDR_PAGE:
1743 ns->regs.num = ns->geom.pgaddrbytes;
1744 break;
1745 case STATE_ADDR_SEC:
1746 ns->regs.num = ns->geom.secaddrbytes;
1747 break;
1748 case STATE_ADDR_ZERO:
1749 ns->regs.num = 1;
1750 break;
1751 default:
1752 BUG();
1753 }
1754 }
1755
1756 /* Check that chip is expecting address */
1757 if (!(ns->nxstate & STATE_ADDR_MASK)) {
1758 NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
1759 "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
1760 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1761 return;
1762 }
1763
1764 /* Check if this is expected byte */
1765 if (ns->regs.count == ns->regs.num) {
1766 NS_ERR("write_byte: no more address bytes expected\n");
1767 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1768 return;
1769 }
1770
1771 accept_addr_byte(ns, byte);
1772
1773 ns->regs.count += 1;
1774
1775 NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
1776 (uint)byte, ns->regs.count, ns->regs.num);
1777
1778 if (ns->regs.count == ns->regs.num) {
1779 NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
1780 switch_state(ns);
1781 }
1782
1783 } else {
1784 /*
1785 * The byte written is an input data.
1786 */
1787
1788 /* Check that chip is expecting data input */
1789 if (!(ns->state & STATE_DATAIN_MASK)) {
1790 NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
1791 "switch to %s\n", (uint)byte,
1792 get_state_name(ns->state), get_state_name(STATE_READY));
1793 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1794 return;
1795 }
1796
1797 /* Check if this is expected byte */
1798 if (ns->regs.count == ns->regs.num) {
1799 NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
1800 ns->regs.num);
1801 return;
1802 }
1803
1804 if (ns->busw == 8) {
1805 ns->buf.byte[ns->regs.count] = byte;
1806 ns->regs.count += 1;
1807 } else {
1808 ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
1809 ns->regs.count += 2;
1810 }
1811 }
1812
1813 return;
1814 }
1815
1816 static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
1817 {
1818 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
1819
1820 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
1821 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
1822 ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
1823
1824 if (cmd != NAND_CMD_NONE)
1825 ns_nand_write_byte(mtd, cmd);
1826 }
1827
1828 static int ns_device_ready(struct mtd_info *mtd)
1829 {
1830 NS_DBG("device_ready\n");
1831 return 1;
1832 }
1833
1834 static uint16_t ns_nand_read_word(struct mtd_info *mtd)
1835 {
1836 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
1837
1838 NS_DBG("read_word\n");
1839
1840 return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
1841 }
1842
1843 static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
1844 {
1845 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1846
1847 /* Check that chip is expecting data input */
1848 if (!(ns->state & STATE_DATAIN_MASK)) {
1849 NS_ERR("write_buf: data input isn't expected, state is %s, "
1850 "switch to STATE_READY\n", get_state_name(ns->state));
1851 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1852 return;
1853 }
1854
1855 /* Check if these are expected bytes */
1856 if (ns->regs.count + len > ns->regs.num) {
1857 NS_ERR("write_buf: too many input bytes\n");
1858 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1859 return;
1860 }
1861
1862 memcpy(ns->buf.byte + ns->regs.count, buf, len);
1863 ns->regs.count += len;
1864
1865 if (ns->regs.count == ns->regs.num) {
1866 NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
1867 }
1868 }
1869
1870 static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
1871 {
1872 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1873
1874 /* Sanity and correctness checks */
1875 if (!ns->lines.ce) {
1876 NS_ERR("read_buf: chip is disabled\n");
1877 return;
1878 }
1879 if (ns->lines.ale || ns->lines.cle) {
1880 NS_ERR("read_buf: ALE or CLE pin is high\n");
1881 return;
1882 }
1883 if (!(ns->state & STATE_DATAOUT_MASK)) {
1884 NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
1885 get_state_name(ns->state));
1886 return;
1887 }
1888
1889 if (NS_STATE(ns->state) != STATE_DATAOUT) {
1890 int i;
1891
1892 for (i = 0; i < len; i++)
1893 buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
1894
1895 return;
1896 }
1897
1898 /* Check if these are expected bytes */
1899 if (ns->regs.count + len > ns->regs.num) {
1900 NS_ERR("read_buf: too many bytes to read\n");
1901 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1902 return;
1903 }
1904
1905 memcpy(buf, ns->buf.byte + ns->regs.count, len);
1906 ns->regs.count += len;
1907
1908 if (ns->regs.count == ns->regs.num) {
1909 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1910 ns->regs.count = 0;
1911 if (ns->regs.row + 1 < ns->geom.pgnum)
1912 ns->regs.row += 1;
1913 NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
1914 do_state_action(ns, ACTION_CPY);
1915 }
1916 else if (NS_STATE(ns->nxstate) == STATE_READY)
1917 switch_state(ns);
1918 }
1919
1920 return;
1921 }
1922
1923 static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
1924 {
1925 ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
1926
1927 if (!memcmp(buf, &ns_verify_buf[0], len)) {
1928 NS_DBG("verify_buf: the buffer is OK\n");
1929 return 0;
1930 } else {
1931 NS_DBG("verify_buf: the buffer is wrong\n");
1932 return -EFAULT;
1933 }
1934 }
1935
1936 /*
1937 * Module initialization function
1938 */
1939 static int __init ns_init_module(void)
1940 {
1941 struct nand_chip *chip;
1942 struct nandsim *nand;
1943 int retval = -ENOMEM, i;
1944
1945 if (bus_width != 8 && bus_width != 16) {
1946 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
1947 return -EINVAL;
1948 }
1949
1950 /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
1951 nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
1952 + sizeof(struct nandsim), GFP_KERNEL);
1953 if (!nsmtd) {
1954 NS_ERR("unable to allocate core structures.\n");
1955 return -ENOMEM;
1956 }
1957 chip = (struct nand_chip *)(nsmtd + 1);
1958 nsmtd->priv = (void *)chip;
1959 nand = (struct nandsim *)(chip + 1);
1960 chip->priv = (void *)nand;
1961
1962 /*
1963 * Register simulator's callbacks.
1964 */
1965 chip->cmd_ctrl = ns_hwcontrol;
1966 chip->read_byte = ns_nand_read_byte;
1967 chip->dev_ready = ns_device_ready;
1968 chip->write_buf = ns_nand_write_buf;
1969 chip->read_buf = ns_nand_read_buf;
1970 chip->verify_buf = ns_nand_verify_buf;
1971 chip->read_word = ns_nand_read_word;
1972 chip->ecc.mode = NAND_ECC_SOFT;
1973 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
1974 /* and 'badblocks' parameters to work */
1975 chip->options |= NAND_SKIP_BBTSCAN;
1976
1977 /*
1978 * Perform minimum nandsim structure initialization to handle
1979 * the initial ID read command correctly
1980 */
1981 if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
1982 nand->geom.idbytes = 4;
1983 else
1984 nand->geom.idbytes = 2;
1985 nand->regs.status = NS_STATUS_OK(nand);
1986 nand->nxstate = STATE_UNKNOWN;
1987 nand->options |= OPT_PAGE256; /* temporary value */
1988 nand->ids[0] = first_id_byte;
1989 nand->ids[1] = second_id_byte;
1990 nand->ids[2] = third_id_byte;
1991 nand->ids[3] = fourth_id_byte;
1992 if (bus_width == 16) {
1993 nand->busw = 16;
1994 chip->options |= NAND_BUSWIDTH_16;
1995 }
1996
1997 nsmtd->owner = THIS_MODULE;
1998
1999 if ((retval = parse_weakblocks()) != 0)
2000 goto error;
2001
2002 if ((retval = parse_weakpages()) != 0)
2003 goto error;
2004
2005 if ((retval = parse_gravepages()) != 0)
2006 goto error;
2007
2008 if ((retval = nand_scan(nsmtd, 1)) != 0) {
2009 NS_ERR("can't register NAND Simulator\n");
2010 if (retval > 0)
2011 retval = -ENXIO;
2012 goto error;
2013 }
2014
2015 if (overridesize) {
2016 u_int32_t new_size = nsmtd->erasesize << overridesize;
2017 if (new_size >> overridesize != nsmtd->erasesize) {
2018 NS_ERR("overridesize is too big\n");
2019 goto err_exit;
2020 }
2021 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
2022 nsmtd->size = new_size;
2023 chip->chipsize = new_size;
2024 chip->chip_shift = ffs(new_size) - 1;
2025 }
2026
2027 if ((retval = setup_wear_reporting(nsmtd)) != 0)
2028 goto err_exit;
2029
2030 if ((retval = init_nandsim(nsmtd)) != 0)
2031 goto err_exit;
2032
2033 if ((retval = parse_badblocks(nand, nsmtd)) != 0)
2034 goto err_exit;
2035
2036 if ((retval = nand_default_bbt(nsmtd)) != 0)
2037 goto err_exit;
2038
2039 /* Register NAND partitions */
2040 if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0)
2041 goto err_exit;
2042
2043 return 0;
2044
2045 err_exit:
2046 free_nandsim(nand);
2047 nand_release(nsmtd);
2048 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
2049 kfree(nand->partitions[i].name);
2050 error:
2051 kfree(nsmtd);
2052 free_lists();
2053
2054 return retval;
2055 }
2056
2057 module_init(ns_init_module);
2058
2059 /*
2060 * Module clean-up function
2061 */
2062 static void __exit ns_cleanup_module(void)
2063 {
2064 struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv);
2065 int i;
2066
2067 free_nandsim(ns); /* Free nandsim private resources */
2068 nand_release(nsmtd); /* Unregister driver */
2069 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2070 kfree(ns->partitions[i].name);
2071 kfree(nsmtd); /* Free other structures */
2072 free_lists();
2073 }
2074
2075 module_exit(ns_cleanup_module);
2076
2077 MODULE_LICENSE ("GPL");
2078 MODULE_AUTHOR ("Artem B. Bityuckiy");
2079 MODULE_DESCRIPTION ("The NAND flash simulator");