Merge branch 'tracing-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ieee1394 / csr1212.c
1 /*
2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
3 *
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30
31 /* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
34 */
35
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/string.h>
39 #include <asm/bug.h>
40 #include <asm/byteorder.h>
41
42 #include "csr1212.h"
43
44
45 /* Permitted key type for each key id */
46 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
47 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
48 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
49 #define __L (1 << CSR1212_KV_TYPE_LEAF)
50 static const u8 csr1212_key_id_type_map[0x30] = {
51 __C, /* used by Apple iSight */
52 __D | __L, /* Descriptor */
53 __I | __D | __L, /* Bus_Dependent_Info */
54 __I | __D | __L, /* Vendor */
55 __I, /* Hardware_Version */
56 0, 0, /* Reserved */
57 __D | __L | __I, /* Module */
58 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
59 __I, /* Node_Capabilities */
60 __L, /* EUI_64 */
61 0, 0, 0, /* Reserved */
62 __D, /* Unit */
63 __I, /* Specifier_ID */
64 __I, /* Version */
65 __I | __C | __D | __L, /* Dependent_Info */
66 __L, /* Unit_Location */
67 0, /* Reserved */
68 __I, /* Model */
69 __D, /* Instance */
70 __L, /* Keyword */
71 __D, /* Feature */
72 __L, /* Extended_ROM */
73 __I, /* Extended_Key_Specifier_ID */
74 __I, /* Extended_Key */
75 __I | __C | __D | __L, /* Extended_Data */
76 __L, /* Modifiable_Descriptor */
77 __I, /* Directory_ID */
78 __I, /* Revision */
79 };
80 #undef __I
81 #undef __C
82 #undef __D
83 #undef __L
84
85
86 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
87 #define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32))
88
89 static void free_keyval(struct csr1212_keyval *kv)
90 {
91 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
92 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
93 CSR1212_FREE(kv->value.leaf.data);
94
95 CSR1212_FREE(kv);
96 }
97
98 static u16 csr1212_crc16(const u32 *buffer, size_t length)
99 {
100 int shift;
101 u32 data;
102 u16 sum, crc = 0;
103
104 for (; length; length--) {
105 data = be32_to_cpu(*buffer);
106 buffer++;
107 for (shift = 28; shift >= 0; shift -= 4 ) {
108 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
109 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
110 }
111 crc &= 0xffff;
112 }
113
114 return cpu_to_be16(crc);
115 }
116
117 /* Microsoft computes the CRC with the bytes in reverse order. */
118 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
119 {
120 int shift;
121 u32 data;
122 u16 sum, crc = 0;
123
124 for (; length; length--) {
125 data = le32_to_cpu(*buffer);
126 buffer++;
127 for (shift = 28; shift >= 0; shift -= 4 ) {
128 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
129 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
130 }
131 crc &= 0xffff;
132 }
133
134 return cpu_to_be16(crc);
135 }
136
137 static struct csr1212_dentry *
138 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
139 {
140 struct csr1212_dentry *pos;
141
142 for (pos = dir->value.directory.dentries_head;
143 pos != NULL; pos = pos->next)
144 if (pos->kv == kv)
145 return pos;
146 return NULL;
147 }
148
149 static struct csr1212_keyval *
150 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
151 {
152 struct csr1212_keyval *kv;
153
154 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
155 if (kv->offset == offset)
156 return kv;
157 return NULL;
158 }
159
160
161 /* Creation Routines */
162
163 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
164 size_t bus_info_size, void *private)
165 {
166 struct csr1212_csr *csr;
167
168 csr = CSR1212_MALLOC(sizeof(*csr));
169 if (!csr)
170 return NULL;
171
172 csr->cache_head =
173 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
174 CSR1212_CONFIG_ROM_SPACE_SIZE);
175 if (!csr->cache_head) {
176 CSR1212_FREE(csr);
177 return NULL;
178 }
179
180 /* The keyval key id is not used for the root node, but a valid key id
181 * that can be used for a directory needs to be passed to
182 * csr1212_new_directory(). */
183 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
184 if (!csr->root_kv) {
185 CSR1212_FREE(csr->cache_head);
186 CSR1212_FREE(csr);
187 return NULL;
188 }
189
190 csr->bus_info_data = csr->cache_head->data;
191 csr->bus_info_len = bus_info_size;
192 csr->crc_len = bus_info_size;
193 csr->ops = ops;
194 csr->private = private;
195 csr->cache_tail = csr->cache_head;
196
197 return csr;
198 }
199
200 void csr1212_init_local_csr(struct csr1212_csr *csr,
201 const u32 *bus_info_data, int max_rom)
202 {
203 static const int mr_map[] = { 4, 64, 1024, 0 };
204
205 BUG_ON(max_rom & ~0x3);
206 csr->max_rom = mr_map[max_rom];
207 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
208 }
209
210 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
211 {
212 struct csr1212_keyval *kv;
213
214 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
215 return NULL;
216
217 kv = CSR1212_MALLOC(sizeof(*kv));
218 if (!kv)
219 return NULL;
220
221 atomic_set(&kv->refcnt, 1);
222 kv->key.type = type;
223 kv->key.id = key;
224 kv->associate = NULL;
225 kv->next = NULL;
226 kv->prev = NULL;
227 kv->offset = 0;
228 kv->valid = 0;
229 return kv;
230 }
231
232 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
233 {
234 struct csr1212_keyval *kv;
235
236 kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
237 if (!kv)
238 return NULL;
239
240 kv->value.immediate = value;
241 kv->valid = 1;
242 return kv;
243 }
244
245 static struct csr1212_keyval *
246 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
247 {
248 struct csr1212_keyval *kv;
249
250 kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
251 if (!kv)
252 return NULL;
253
254 if (data_len > 0) {
255 kv->value.leaf.data = CSR1212_MALLOC(data_len);
256 if (!kv->value.leaf.data) {
257 CSR1212_FREE(kv);
258 return NULL;
259 }
260
261 if (data)
262 memcpy(kv->value.leaf.data, data, data_len);
263 } else {
264 kv->value.leaf.data = NULL;
265 }
266
267 kv->value.leaf.len = bytes_to_quads(data_len);
268 kv->offset = 0;
269 kv->valid = 1;
270
271 return kv;
272 }
273
274 static struct csr1212_keyval *
275 csr1212_new_csr_offset(u8 key, u32 csr_offset)
276 {
277 struct csr1212_keyval *kv;
278
279 kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
280 if (!kv)
281 return NULL;
282
283 kv->value.csr_offset = csr_offset;
284
285 kv->offset = 0;
286 kv->valid = 1;
287 return kv;
288 }
289
290 struct csr1212_keyval *csr1212_new_directory(u8 key)
291 {
292 struct csr1212_keyval *kv;
293
294 kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
295 if (!kv)
296 return NULL;
297
298 kv->value.directory.len = 0;
299 kv->offset = 0;
300 kv->value.directory.dentries_head = NULL;
301 kv->value.directory.dentries_tail = NULL;
302 kv->valid = 1;
303 return kv;
304 }
305
306 void csr1212_associate_keyval(struct csr1212_keyval *kv,
307 struct csr1212_keyval *associate)
308 {
309 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
310 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
311 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
312 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
313 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
314 associate->key.id < 0x30) ||
315 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
316 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
317 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
318 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
319 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
320 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
321 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
322 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
323
324 if (kv->associate)
325 csr1212_release_keyval(kv->associate);
326
327 csr1212_keep_keyval(associate);
328 kv->associate = associate;
329 }
330
331 static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
332 struct csr1212_keyval *kv,
333 bool keep_keyval)
334 {
335 struct csr1212_dentry *dentry;
336
337 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
338
339 dentry = CSR1212_MALLOC(sizeof(*dentry));
340 if (!dentry)
341 return -ENOMEM;
342
343 if (keep_keyval)
344 csr1212_keep_keyval(kv);
345 dentry->kv = kv;
346
347 dentry->next = NULL;
348 dentry->prev = dir->value.directory.dentries_tail;
349
350 if (!dir->value.directory.dentries_head)
351 dir->value.directory.dentries_head = dentry;
352
353 if (dir->value.directory.dentries_tail)
354 dir->value.directory.dentries_tail->next = dentry;
355 dir->value.directory.dentries_tail = dentry;
356
357 return CSR1212_SUCCESS;
358 }
359
360 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
361 struct csr1212_keyval *kv)
362 {
363 return __csr1212_attach_keyval_to_directory(dir, kv, true);
364 }
365
366 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
367 (&((kv)->value.leaf.data[1]))
368
369 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
370 ((kv)->value.leaf.data[0] = \
371 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
372 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
373 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
374 ((kv)->value.leaf.data[0] = \
375 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
376 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
377 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
378
379 static struct csr1212_keyval *
380 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
381 const void *data, size_t data_len)
382 {
383 struct csr1212_keyval *kv;
384
385 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
386 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
387 if (!kv)
388 return NULL;
389
390 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
391 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
392
393 if (data)
394 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
395
396 return kv;
397 }
398
399 /* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
400 static int csr1212_check_minimal_ascii(const char *s)
401 {
402 static const char minimal_ascii_table[] = {
403 /* 1 2 4 8 16 32 64 128 */
404 128, /* --, --, --, --, --, --, --, 07, */
405 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
406 0, /* --, --, --, --, --, --, --, --, */
407 0, /* --, --, --, --, --, --, --, --, */
408 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
409 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
410 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
411 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
412 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
413 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
414 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
415 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
416 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
417 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
418 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
419 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
420 };
421 int i, j;
422
423 for (; *s; s++) {
424 i = *s >> 3; /* i = *s / 8; */
425 j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
426
427 if (i >= ARRAY_SIZE(minimal_ascii_table) ||
428 !(minimal_ascii_table[i] & j))
429 return -EINVAL;
430 }
431 return 0;
432 }
433
434 /* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
435 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
436 {
437 struct csr1212_keyval *kv;
438 u32 *text;
439 size_t str_len, quads;
440
441 if (!s || !*s || csr1212_check_minimal_ascii(s))
442 return NULL;
443
444 str_len = strlen(s);
445 quads = bytes_to_quads(str_len);
446 kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
447 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
448 if (!kv)
449 return NULL;
450
451 kv->value.leaf.data[1] = 0; /* width, character_set, language */
452 text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
453 text[quads - 1] = 0; /* padding */
454 memcpy(text, s, str_len);
455
456 return kv;
457 }
458
459
460 /* Destruction Routines */
461
462 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
463 struct csr1212_keyval *kv)
464 {
465 struct csr1212_dentry *dentry;
466
467 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
468 return;
469
470 dentry = csr1212_find_keyval(dir, kv);
471
472 if (!dentry)
473 return;
474
475 if (dentry->prev)
476 dentry->prev->next = dentry->next;
477 if (dentry->next)
478 dentry->next->prev = dentry->prev;
479 if (dir->value.directory.dentries_head == dentry)
480 dir->value.directory.dentries_head = dentry->next;
481 if (dir->value.directory.dentries_tail == dentry)
482 dir->value.directory.dentries_tail = dentry->prev;
483
484 CSR1212_FREE(dentry);
485
486 csr1212_release_keyval(kv);
487 }
488
489 /* This function is used to free the memory taken by a keyval. If the given
490 * keyval is a directory type, then any keyvals contained in that directory
491 * will be destroyed as well if noone holds a reference on them. By means of
492 * list manipulation, this routine will descend a directory structure in a
493 * non-recursive manner. */
494 void csr1212_release_keyval(struct csr1212_keyval *kv)
495 {
496 struct csr1212_keyval *k, *a;
497 struct csr1212_dentry dentry;
498 struct csr1212_dentry *head, *tail;
499
500 if (!atomic_dec_and_test(&kv->refcnt))
501 return;
502
503 dentry.kv = kv;
504 dentry.next = NULL;
505 dentry.prev = NULL;
506
507 head = &dentry;
508 tail = head;
509
510 while (head) {
511 k = head->kv;
512
513 while (k) {
514 /* must not dec_and_test kv->refcnt again */
515 if (k != kv && !atomic_dec_and_test(&k->refcnt))
516 break;
517
518 a = k->associate;
519
520 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
521 /* If the current entry is a directory, move all
522 * the entries to the destruction list. */
523 if (k->value.directory.dentries_head) {
524 tail->next =
525 k->value.directory.dentries_head;
526 k->value.directory.dentries_head->prev =
527 tail;
528 tail = k->value.directory.dentries_tail;
529 }
530 }
531 free_keyval(k);
532 k = a;
533 }
534
535 head = head->next;
536 if (head) {
537 if (head->prev && head->prev != &dentry)
538 CSR1212_FREE(head->prev);
539 head->prev = NULL;
540 } else if (tail != &dentry) {
541 CSR1212_FREE(tail);
542 }
543 }
544 }
545
546 void csr1212_destroy_csr(struct csr1212_csr *csr)
547 {
548 struct csr1212_csr_rom_cache *c, *oc;
549 struct csr1212_cache_region *cr, *ocr;
550
551 csr1212_release_keyval(csr->root_kv);
552
553 c = csr->cache_head;
554 while (c) {
555 oc = c;
556 cr = c->filled_head;
557 while (cr) {
558 ocr = cr;
559 cr = cr->next;
560 CSR1212_FREE(ocr);
561 }
562 c = c->next;
563 CSR1212_FREE(oc);
564 }
565
566 CSR1212_FREE(csr);
567 }
568
569
570 /* CSR Image Creation */
571
572 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
573 {
574 struct csr1212_csr_rom_cache *cache;
575 u64 csr_addr;
576
577 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
578 !csr->ops->release_addr || csr->max_rom < 1);
579
580 /* ROM size must be a multiple of csr->max_rom */
581 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
582
583 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
584 csr->private);
585 if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
586 return -ENOMEM;
587
588 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
589 /* Invalid address returned from allocate_addr_range(). */
590 csr->ops->release_addr(csr_addr, csr->private);
591 return -ENOMEM;
592 }
593
594 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
595 romsize);
596 if (!cache) {
597 csr->ops->release_addr(csr_addr, csr->private);
598 return -ENOMEM;
599 }
600
601 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
602 CSR1212_KV_ID_EXTENDED_ROM);
603 if (!cache->ext_rom) {
604 csr->ops->release_addr(csr_addr, csr->private);
605 CSR1212_FREE(cache);
606 return -ENOMEM;
607 }
608
609 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
610 CSR1212_SUCCESS) {
611 csr1212_release_keyval(cache->ext_rom);
612 csr->ops->release_addr(csr_addr, csr->private);
613 CSR1212_FREE(cache);
614 return -ENOMEM;
615 }
616 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
617 cache->ext_rom->value.leaf.len = -1;
618 cache->ext_rom->value.leaf.data = cache->data;
619
620 /* Add cache to tail of cache list */
621 cache->prev = csr->cache_tail;
622 csr->cache_tail->next = cache;
623 csr->cache_tail = cache;
624 return CSR1212_SUCCESS;
625 }
626
627 static void csr1212_remove_cache(struct csr1212_csr *csr,
628 struct csr1212_csr_rom_cache *cache)
629 {
630 if (csr->cache_head == cache)
631 csr->cache_head = cache->next;
632 if (csr->cache_tail == cache)
633 csr->cache_tail = cache->prev;
634
635 if (cache->prev)
636 cache->prev->next = cache->next;
637 if (cache->next)
638 cache->next->prev = cache->prev;
639
640 if (cache->ext_rom) {
641 csr1212_detach_keyval_from_directory(csr->root_kv,
642 cache->ext_rom);
643 csr1212_release_keyval(cache->ext_rom);
644 }
645
646 CSR1212_FREE(cache);
647 }
648
649 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
650 struct csr1212_keyval **layout_tail)
651 {
652 struct csr1212_dentry *dentry;
653 struct csr1212_keyval *dkv;
654 struct csr1212_keyval *last_extkey_spec = NULL;
655 struct csr1212_keyval *last_extkey = NULL;
656 int num_entries = 0;
657
658 for (dentry = dir->value.directory.dentries_head; dentry;
659 dentry = dentry->next) {
660 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
661 /* Special Case: Extended Key Specifier_ID */
662 if (dkv->key.id ==
663 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
664 if (last_extkey_spec == NULL)
665 last_extkey_spec = dkv;
666 else if (dkv->value.immediate !=
667 last_extkey_spec->value.immediate)
668 last_extkey_spec = dkv;
669 else
670 continue;
671 /* Special Case: Extended Key */
672 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
673 if (last_extkey == NULL)
674 last_extkey = dkv;
675 else if (dkv->value.immediate !=
676 last_extkey->value.immediate)
677 last_extkey = dkv;
678 else
679 continue;
680 }
681
682 num_entries += 1;
683
684 switch (dkv->key.type) {
685 default:
686 case CSR1212_KV_TYPE_IMMEDIATE:
687 case CSR1212_KV_TYPE_CSR_OFFSET:
688 break;
689 case CSR1212_KV_TYPE_LEAF:
690 case CSR1212_KV_TYPE_DIRECTORY:
691 /* Remove from list */
692 if (dkv->prev && (dkv->prev->next == dkv))
693 dkv->prev->next = dkv->next;
694 if (dkv->next && (dkv->next->prev == dkv))
695 dkv->next->prev = dkv->prev;
696 //if (dkv == *layout_tail)
697 // *layout_tail = dkv->prev;
698
699 /* Special case: Extended ROM leafs */
700 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
701 dkv->value.leaf.len = -1;
702 /* Don't add Extended ROM leafs in the
703 * layout list, they are handled
704 * differently. */
705 break;
706 }
707
708 /* Add to tail of list */
709 dkv->next = NULL;
710 dkv->prev = *layout_tail;
711 (*layout_tail)->next = dkv;
712 *layout_tail = dkv;
713 break;
714 }
715 }
716 }
717 return num_entries;
718 }
719
720 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
721 {
722 struct csr1212_keyval *ltail = kv;
723 size_t agg_size = 0;
724
725 while (kv) {
726 switch (kv->key.type) {
727 case CSR1212_KV_TYPE_LEAF:
728 /* Add 1 quadlet for crc/len field */
729 agg_size += kv->value.leaf.len + 1;
730 break;
731
732 case CSR1212_KV_TYPE_DIRECTORY:
733 kv->value.directory.len =
734 csr1212_generate_layout_subdir(kv, &ltail);
735 /* Add 1 quadlet for crc/len field */
736 agg_size += kv->value.directory.len + 1;
737 break;
738 }
739 kv = kv->next;
740 }
741 return quads_to_bytes(agg_size);
742 }
743
744 static struct csr1212_keyval *
745 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
746 struct csr1212_keyval *start_kv, int start_pos)
747 {
748 struct csr1212_keyval *kv = start_kv;
749 struct csr1212_keyval *okv = start_kv;
750 int pos = start_pos;
751 int kv_len = 0, okv_len = 0;
752
753 cache->layout_head = kv;
754
755 while (kv && pos < cache->size) {
756 /* Special case: Extended ROM leafs */
757 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
758 kv->offset = cache->offset + pos;
759
760 switch (kv->key.type) {
761 case CSR1212_KV_TYPE_LEAF:
762 kv_len = kv->value.leaf.len;
763 break;
764
765 case CSR1212_KV_TYPE_DIRECTORY:
766 kv_len = kv->value.directory.len;
767 break;
768
769 default:
770 /* Should never get here */
771 WARN_ON(1);
772 break;
773 }
774
775 pos += quads_to_bytes(kv_len + 1);
776
777 if (pos <= cache->size) {
778 okv = kv;
779 okv_len = kv_len;
780 kv = kv->next;
781 }
782 }
783
784 cache->layout_tail = okv;
785 cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
786
787 return kv;
788 }
789
790 #define CSR1212_KV_KEY_SHIFT 24
791 #define CSR1212_KV_KEY_TYPE_SHIFT 6
792 #define CSR1212_KV_KEY_ID_MASK 0x3f
793 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
794
795 static void
796 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
797 {
798 struct csr1212_dentry *dentry;
799 struct csr1212_keyval *last_extkey_spec = NULL;
800 struct csr1212_keyval *last_extkey = NULL;
801 int index = 0;
802
803 for (dentry = dir->value.directory.dentries_head;
804 dentry;
805 dentry = dentry->next) {
806 struct csr1212_keyval *a;
807
808 for (a = dentry->kv; a; a = a->associate) {
809 u32 value = 0;
810
811 /* Special Case: Extended Key Specifier_ID */
812 if (a->key.id ==
813 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
814 if (last_extkey_spec == NULL)
815 last_extkey_spec = a;
816 else if (a->value.immediate !=
817 last_extkey_spec->value.immediate)
818 last_extkey_spec = a;
819 else
820 continue;
821
822 /* Special Case: Extended Key */
823 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
824 if (last_extkey == NULL)
825 last_extkey = a;
826 else if (a->value.immediate !=
827 last_extkey->value.immediate)
828 last_extkey = a;
829 else
830 continue;
831 }
832
833 switch (a->key.type) {
834 case CSR1212_KV_TYPE_IMMEDIATE:
835 value = a->value.immediate;
836 break;
837 case CSR1212_KV_TYPE_CSR_OFFSET:
838 value = a->value.csr_offset;
839 break;
840 case CSR1212_KV_TYPE_LEAF:
841 value = a->offset;
842 value -= dir->offset + quads_to_bytes(1+index);
843 value = bytes_to_quads(value);
844 break;
845 case CSR1212_KV_TYPE_DIRECTORY:
846 value = a->offset;
847 value -= dir->offset + quads_to_bytes(1+index);
848 value = bytes_to_quads(value);
849 break;
850 default:
851 /* Should never get here */
852 WARN_ON(1);
853 break;
854 }
855
856 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
857 CSR1212_KV_KEY_SHIFT;
858 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
859 (CSR1212_KV_KEY_SHIFT +
860 CSR1212_KV_KEY_TYPE_SHIFT);
861 data_buffer[index] = cpu_to_be32(value);
862 index++;
863 }
864 }
865 }
866
867 struct csr1212_keyval_img {
868 u16 length;
869 u16 crc;
870
871 /* Must be last */
872 u32 data[0]; /* older gcc can't handle [] which is standard */
873 };
874
875 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
876 {
877 struct csr1212_keyval *kv, *nkv;
878 struct csr1212_keyval_img *kvi;
879
880 for (kv = cache->layout_head;
881 kv != cache->layout_tail->next;
882 kv = nkv) {
883 kvi = (struct csr1212_keyval_img *)(cache->data +
884 bytes_to_quads(kv->offset - cache->offset));
885 switch (kv->key.type) {
886 default:
887 case CSR1212_KV_TYPE_IMMEDIATE:
888 case CSR1212_KV_TYPE_CSR_OFFSET:
889 /* Should never get here */
890 WARN_ON(1);
891 break;
892
893 case CSR1212_KV_TYPE_LEAF:
894 /* Don't copy over Extended ROM areas, they are
895 * already filled out! */
896 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
897 memcpy(kvi->data, kv->value.leaf.data,
898 quads_to_bytes(kv->value.leaf.len));
899
900 kvi->length = cpu_to_be16(kv->value.leaf.len);
901 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
902 break;
903
904 case CSR1212_KV_TYPE_DIRECTORY:
905 csr1212_generate_tree_subdir(kv, kvi->data);
906
907 kvi->length = cpu_to_be16(kv->value.directory.len);
908 kvi->crc = csr1212_crc16(kvi->data,
909 kv->value.directory.len);
910 break;
911 }
912
913 nkv = kv->next;
914 if (kv->prev)
915 kv->prev->next = NULL;
916 if (kv->next)
917 kv->next->prev = NULL;
918 kv->prev = NULL;
919 kv->next = NULL;
920 }
921 }
922
923 /* This size is arbitrarily chosen.
924 * The struct overhead is subtracted for more economic allocations. */
925 #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
926
927 int csr1212_generate_csr_image(struct csr1212_csr *csr)
928 {
929 struct csr1212_bus_info_block_img *bi;
930 struct csr1212_csr_rom_cache *cache;
931 struct csr1212_keyval *kv;
932 size_t agg_size;
933 int ret;
934 int init_offset;
935
936 BUG_ON(!csr);
937
938 cache = csr->cache_head;
939
940 bi = (struct csr1212_bus_info_block_img*)cache->data;
941
942 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
943 bi->crc_length = bi->length;
944 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
945
946 csr->root_kv->next = NULL;
947 csr->root_kv->prev = NULL;
948
949 agg_size = csr1212_generate_layout_order(csr->root_kv);
950
951 init_offset = csr->bus_info_len;
952
953 for (kv = csr->root_kv, cache = csr->cache_head;
954 kv;
955 cache = cache->next) {
956 if (!cache) {
957 /* Estimate approximate number of additional cache
958 * regions needed (it assumes that the cache holding
959 * the first 1K Config ROM space always exists). */
960 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
961 (2 * sizeof(u32))) + 1;
962
963 /* Add additional cache regions, extras will be
964 * removed later */
965 for (; est_c; est_c--) {
966 ret = csr1212_append_new_cache(csr,
967 CSR1212_EXTENDED_ROM_SIZE);
968 if (ret != CSR1212_SUCCESS)
969 return ret;
970 }
971 /* Need to re-layout for additional cache regions */
972 agg_size = csr1212_generate_layout_order(csr->root_kv);
973 kv = csr->root_kv;
974 cache = csr->cache_head;
975 init_offset = csr->bus_info_len;
976 }
977 kv = csr1212_generate_positions(cache, kv, init_offset);
978 agg_size -= cache->len;
979 init_offset = sizeof(u32);
980 }
981
982 /* Remove unused, excess cache regions */
983 while (cache) {
984 struct csr1212_csr_rom_cache *oc = cache;
985
986 cache = cache->next;
987 csr1212_remove_cache(csr, oc);
988 }
989
990 /* Go through the list backward so that when done, the correct CRC
991 * will be calculated for the Extended ROM areas. */
992 for (cache = csr->cache_tail; cache; cache = cache->prev) {
993 /* Only Extended ROM caches should have this set. */
994 if (cache->ext_rom) {
995 int leaf_size;
996
997 /* Make sure the Extended ROM leaf is a multiple of
998 * max_rom in size. */
999 BUG_ON(csr->max_rom < 1);
1000 leaf_size = (cache->len + (csr->max_rom - 1)) &
1001 ~(csr->max_rom - 1);
1002
1003 /* Zero out the unused ROM region */
1004 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1005 leaf_size - cache->len);
1006
1007 /* Subtract leaf header */
1008 leaf_size -= sizeof(u32);
1009
1010 /* Update the Extended ROM leaf length */
1011 cache->ext_rom->value.leaf.len =
1012 bytes_to_quads(leaf_size);
1013 } else {
1014 /* Zero out the unused ROM region */
1015 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1016 cache->size - cache->len);
1017 }
1018
1019 /* Copy the data into the cache buffer */
1020 csr1212_fill_cache(cache);
1021
1022 if (cache != csr->cache_head) {
1023 /* Set the length and CRC of the extended ROM. */
1024 struct csr1212_keyval_img *kvi =
1025 (struct csr1212_keyval_img*)cache->data;
1026 u16 len = bytes_to_quads(cache->len) - 1;
1027
1028 kvi->length = cpu_to_be16(len);
1029 kvi->crc = csr1212_crc16(kvi->data, len);
1030 }
1031 }
1032
1033 return CSR1212_SUCCESS;
1034 }
1035
1036 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1037 {
1038 struct csr1212_csr_rom_cache *cache;
1039
1040 for (cache = csr->cache_head; cache; cache = cache->next)
1041 if (offset >= cache->offset &&
1042 (offset + len) <= (cache->offset + cache->size)) {
1043 memcpy(buffer, &cache->data[
1044 bytes_to_quads(offset - cache->offset)],
1045 len);
1046 return CSR1212_SUCCESS;
1047 }
1048
1049 return -ENOENT;
1050 }
1051
1052 /*
1053 * Apparently there are many different wrong implementations of the CRC
1054 * algorithm. We don't fail, we just warn... approximately once per GUID.
1055 */
1056 static void
1057 csr1212_check_crc(const u32 *buffer, size_t length, u16 crc, __be32 *guid)
1058 {
1059 static u64 last_bad_eui64;
1060 u64 eui64 = ((u64)be32_to_cpu(guid[0]) << 32) | be32_to_cpu(guid[1]);
1061
1062 if (csr1212_crc16(buffer, length) == crc ||
1063 csr1212_msft_crc16(buffer, length) == crc ||
1064 eui64 == last_bad_eui64)
1065 return;
1066
1067 printk(KERN_DEBUG "ieee1394: config ROM CRC error\n");
1068 last_bad_eui64 = eui64;
1069 }
1070
1071 /* Parse a chunk of data as a Config ROM */
1072
1073 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1074 {
1075 struct csr1212_bus_info_block_img *bi;
1076 struct csr1212_cache_region *cr;
1077 int i;
1078 int ret;
1079
1080 /* IEEE 1212 says that the entire bus info block should be readable in
1081 * a single transaction regardless of the max_rom value.
1082 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1083 * bus info block will be read 1 quadlet at a time. The rest of the
1084 * ConfigROM will be read according to the max_rom field. */
1085 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1086 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1087 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1088 csr->private);
1089 if (ret != CSR1212_SUCCESS)
1090 return ret;
1091
1092 /* check ROM header's info_length */
1093 if (i == 0 &&
1094 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1095 bytes_to_quads(csr->bus_info_len) - 1)
1096 return -EINVAL;
1097 }
1098
1099 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1100 csr->crc_len = quads_to_bytes(bi->crc_length);
1101
1102 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
1103 * is not always the case, so read the rest of the crc area 1 quadlet at
1104 * a time. */
1105 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1106 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1107 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1108 csr->private);
1109 if (ret != CSR1212_SUCCESS)
1110 return ret;
1111 }
1112
1113 csr1212_check_crc(bi->data, bi->crc_length, bi->crc,
1114 &csr->bus_info_data[3]);
1115
1116 cr = CSR1212_MALLOC(sizeof(*cr));
1117 if (!cr)
1118 return -ENOMEM;
1119
1120 cr->next = NULL;
1121 cr->prev = NULL;
1122 cr->offset_start = 0;
1123 cr->offset_end = csr->crc_len + 4;
1124
1125 csr->cache_head->filled_head = cr;
1126 csr->cache_head->filled_tail = cr;
1127
1128 return CSR1212_SUCCESS;
1129 }
1130
1131 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1132 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1133 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1134 #define CSR1212_KV_VAL_MASK 0xffffff
1135 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1136
1137 static int
1138 csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1139 {
1140 int ret = CSR1212_SUCCESS;
1141 struct csr1212_keyval *k = NULL;
1142 u32 offset;
1143 bool keep_keyval = true;
1144
1145 switch (CSR1212_KV_KEY_TYPE(ki)) {
1146 case CSR1212_KV_TYPE_IMMEDIATE:
1147 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1148 CSR1212_KV_VAL(ki));
1149 if (!k) {
1150 ret = -ENOMEM;
1151 goto out;
1152 }
1153 /* Don't keep local reference when parsing. */
1154 keep_keyval = false;
1155 break;
1156
1157 case CSR1212_KV_TYPE_CSR_OFFSET:
1158 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1159 CSR1212_KV_VAL(ki));
1160 if (!k) {
1161 ret = -ENOMEM;
1162 goto out;
1163 }
1164 /* Don't keep local reference when parsing. */
1165 keep_keyval = false;
1166 break;
1167
1168 default:
1169 /* Compute the offset from 0xffff f000 0000. */
1170 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1171 if (offset == kv_pos) {
1172 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1173 * or Directories. The Config ROM image is most likely
1174 * messed up, so we'll just abort here. */
1175 ret = -EIO;
1176 goto out;
1177 }
1178
1179 k = csr1212_find_keyval_offset(dir, offset);
1180
1181 if (k)
1182 break; /* Found it. */
1183
1184 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
1185 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1186 else
1187 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1188
1189 if (!k) {
1190 ret = -ENOMEM;
1191 goto out;
1192 }
1193 /* Don't keep local reference when parsing. */
1194 keep_keyval = false;
1195 /* Contents not read yet so it's not valid. */
1196 k->valid = 0;
1197 k->offset = offset;
1198
1199 k->prev = dir;
1200 k->next = dir->next;
1201 dir->next->prev = k;
1202 dir->next = k;
1203 }
1204 ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
1205 out:
1206 if (ret != CSR1212_SUCCESS && k != NULL)
1207 free_keyval(k);
1208 return ret;
1209 }
1210
1211 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1212 struct csr1212_csr_rom_cache *cache)
1213 {
1214 struct csr1212_keyval_img *kvi;
1215 int i;
1216 int ret = CSR1212_SUCCESS;
1217 int kvi_len;
1218
1219 kvi = (struct csr1212_keyval_img*)
1220 &cache->data[bytes_to_quads(kv->offset - cache->offset)];
1221 kvi_len = be16_to_cpu(kvi->length);
1222
1223 /* GUID is wrong in here in case of extended ROM. We don't care. */
1224 csr1212_check_crc(kvi->data, kvi_len, kvi->crc, &cache->data[3]);
1225
1226 switch (kv->key.type) {
1227 case CSR1212_KV_TYPE_DIRECTORY:
1228 for (i = 0; i < kvi_len; i++) {
1229 u32 ki = kvi->data[i];
1230
1231 /* Some devices put null entries in their unit
1232 * directories. If we come across such an entry,
1233 * then skip it. */
1234 if (ki == 0x0)
1235 continue;
1236 ret = csr1212_parse_dir_entry(kv, ki,
1237 kv->offset + quads_to_bytes(i + 1));
1238 }
1239 kv->value.directory.len = kvi_len;
1240 break;
1241
1242 case CSR1212_KV_TYPE_LEAF:
1243 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1244 size_t size = quads_to_bytes(kvi_len);
1245
1246 kv->value.leaf.data = CSR1212_MALLOC(size);
1247 if (!kv->value.leaf.data) {
1248 ret = -ENOMEM;
1249 goto out;
1250 }
1251
1252 kv->value.leaf.len = kvi_len;
1253 memcpy(kv->value.leaf.data, kvi->data, size);
1254 }
1255 break;
1256 }
1257
1258 kv->valid = 1;
1259 out:
1260 return ret;
1261 }
1262
1263 static int
1264 csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1265 {
1266 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1267 struct csr1212_keyval_img *kvi = NULL;
1268 struct csr1212_csr_rom_cache *cache;
1269 int cache_index;
1270 u64 addr;
1271 u32 *cache_ptr;
1272 u16 kv_len = 0;
1273
1274 BUG_ON(!csr || !kv || csr->max_rom < 1);
1275
1276 /* First find which cache the data should be in (or go in if not read
1277 * yet). */
1278 for (cache = csr->cache_head; cache; cache = cache->next)
1279 if (kv->offset >= cache->offset &&
1280 kv->offset < (cache->offset + cache->size))
1281 break;
1282
1283 if (!cache) {
1284 u32 q, cache_size;
1285
1286 /* Only create a new cache for Extended ROM leaves. */
1287 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1288 return -EINVAL;
1289
1290 if (csr->ops->bus_read(csr,
1291 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1292 sizeof(u32), &q, csr->private))
1293 return -EIO;
1294
1295 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1296
1297 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1298 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1299
1300 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1301 if (!cache)
1302 return -ENOMEM;
1303
1304 kv->value.leaf.data = &cache->data[1];
1305 csr->cache_tail->next = cache;
1306 cache->prev = csr->cache_tail;
1307 cache->next = NULL;
1308 csr->cache_tail = cache;
1309 cache->filled_head =
1310 CSR1212_MALLOC(sizeof(*cache->filled_head));
1311 if (!cache->filled_head)
1312 return -ENOMEM;
1313
1314 cache->filled_head->offset_start = 0;
1315 cache->filled_head->offset_end = sizeof(u32);
1316 cache->filled_tail = cache->filled_head;
1317 cache->filled_head->next = NULL;
1318 cache->filled_head->prev = NULL;
1319 cache->data[0] = q;
1320
1321 /* Don't read the entire extended ROM now. Pieces of it will
1322 * be read when entries inside it are read. */
1323 return csr1212_parse_keyval(kv, cache);
1324 }
1325
1326 cache_index = kv->offset - cache->offset;
1327
1328 /* Now seach read portions of the cache to see if it is there. */
1329 for (cr = cache->filled_head; cr; cr = cr->next) {
1330 if (cache_index < cr->offset_start) {
1331 newcr = CSR1212_MALLOC(sizeof(*newcr));
1332 if (!newcr)
1333 return -ENOMEM;
1334
1335 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1336 newcr->offset_end = newcr->offset_start;
1337 newcr->next = cr;
1338 newcr->prev = cr->prev;
1339 cr->prev = newcr;
1340 cr = newcr;
1341 break;
1342 } else if ((cache_index >= cr->offset_start) &&
1343 (cache_index < cr->offset_end)) {
1344 kvi = (struct csr1212_keyval_img*)
1345 (&cache->data[bytes_to_quads(cache_index)]);
1346 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1347 break;
1348 } else if (cache_index == cr->offset_end) {
1349 break;
1350 }
1351 }
1352
1353 if (!cr) {
1354 cr = cache->filled_tail;
1355 newcr = CSR1212_MALLOC(sizeof(*newcr));
1356 if (!newcr)
1357 return -ENOMEM;
1358
1359 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1360 newcr->offset_end = newcr->offset_start;
1361 newcr->prev = cr;
1362 newcr->next = cr->next;
1363 cr->next = newcr;
1364 cr = newcr;
1365 cache->filled_tail = newcr;
1366 }
1367
1368 while(!kvi || cr->offset_end < cache_index + kv_len) {
1369 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1370 ~(csr->max_rom - 1))];
1371
1372 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1373 cr->offset_end) & ~(csr->max_rom - 1);
1374
1375 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1376 csr->private)) {
1377 if (csr->max_rom == 4)
1378 /* We've got problems! */
1379 return -EIO;
1380
1381 /* Apperently the max_rom value was a lie, set it to
1382 * do quadlet reads and try again. */
1383 csr->max_rom = 4;
1384 continue;
1385 }
1386
1387 cr->offset_end += csr->max_rom - (cr->offset_end &
1388 (csr->max_rom - 1));
1389
1390 if (!kvi && (cr->offset_end > cache_index)) {
1391 kvi = (struct csr1212_keyval_img*)
1392 (&cache->data[bytes_to_quads(cache_index)]);
1393 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1394 }
1395
1396 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1397 /* The Leaf or Directory claims its length extends
1398 * beyond the ConfigROM image region and thus beyond the
1399 * end of our cache region. Therefore, we abort now
1400 * rather than seg faulting later. */
1401 return -EIO;
1402 }
1403
1404 ncr = cr->next;
1405
1406 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1407 /* consolidate region entries */
1408 ncr->offset_start = cr->offset_start;
1409
1410 if (cr->prev)
1411 cr->prev->next = cr->next;
1412 ncr->prev = cr->prev;
1413 if (cache->filled_head == cr)
1414 cache->filled_head = ncr;
1415 CSR1212_FREE(cr);
1416 cr = ncr;
1417 }
1418 }
1419
1420 return csr1212_parse_keyval(kv, cache);
1421 }
1422
1423 struct csr1212_keyval *
1424 csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1425 {
1426 if (!kv)
1427 return NULL;
1428 if (!kv->valid)
1429 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1430 return NULL;
1431 return kv;
1432 }
1433
1434 int csr1212_parse_csr(struct csr1212_csr *csr)
1435 {
1436 static const int mr_map[] = { 4, 64, 1024, 0 };
1437 struct csr1212_dentry *dentry;
1438 int ret;
1439
1440 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1441
1442 ret = csr1212_parse_bus_info_block(csr);
1443 if (ret != CSR1212_SUCCESS)
1444 return ret;
1445
1446 if (!csr->ops->get_max_rom) {
1447 csr->max_rom = mr_map[0]; /* default value */
1448 } else {
1449 int i = csr->ops->get_max_rom(csr->bus_info_data,
1450 csr->private);
1451 if (i & ~0x3)
1452 return -EINVAL;
1453 csr->max_rom = mr_map[i];
1454 }
1455
1456 csr->cache_head->layout_head = csr->root_kv;
1457 csr->cache_head->layout_tail = csr->root_kv;
1458
1459 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1460 csr->bus_info_len;
1461
1462 csr->root_kv->valid = 0;
1463 csr->root_kv->next = csr->root_kv;
1464 csr->root_kv->prev = csr->root_kv;
1465 ret = csr1212_read_keyval(csr, csr->root_kv);
1466 if (ret != CSR1212_SUCCESS)
1467 return ret;
1468
1469 /* Scan through the Root directory finding all extended ROM regions
1470 * and make cache regions for them */
1471 for (dentry = csr->root_kv->value.directory.dentries_head;
1472 dentry; dentry = dentry->next) {
1473 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1474 !dentry->kv->valid) {
1475 ret = csr1212_read_keyval(csr, dentry->kv);
1476 if (ret != CSR1212_SUCCESS)
1477 return ret;
1478 }
1479 }
1480
1481 return CSR1212_SUCCESS;
1482 }