2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/debugfs.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
20 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
22 static int regcache_rbtree_exit(struct regmap
*map
);
24 struct regcache_rbtree_node
{
25 /* the actual rbtree node holding this block */
27 /* base register handled by this block */
28 unsigned int base_reg
;
29 /* block of adjacent registers */
31 /* number of registers available in the block */
33 } __attribute__ ((packed
));
35 struct regcache_rbtree_ctx
{
37 struct regcache_rbtree_node
*cached_rbnode
;
40 static inline void regcache_rbtree_get_base_top_reg(
41 struct regcache_rbtree_node
*rbnode
,
42 unsigned int *base
, unsigned int *top
)
44 *base
= rbnode
->base_reg
;
45 *top
= rbnode
->base_reg
+ rbnode
->blklen
- 1;
48 static unsigned int regcache_rbtree_get_register(
49 struct regcache_rbtree_node
*rbnode
, unsigned int idx
,
50 unsigned int word_size
)
52 return regcache_get_val(rbnode
->block
, idx
, word_size
);
55 static void regcache_rbtree_set_register(struct regcache_rbtree_node
*rbnode
,
56 unsigned int idx
, unsigned int val
,
57 unsigned int word_size
)
59 regcache_set_val(rbnode
->block
, idx
, val
, word_size
);
62 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
65 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
67 struct regcache_rbtree_node
*rbnode
;
68 unsigned int base_reg
, top_reg
;
70 rbnode
= rbtree_ctx
->cached_rbnode
;
72 regcache_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
73 if (reg
>= base_reg
&& reg
<= top_reg
)
77 node
= rbtree_ctx
->root
.rb_node
;
79 rbnode
= container_of(node
, struct regcache_rbtree_node
, node
);
80 regcache_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
81 if (reg
>= base_reg
&& reg
<= top_reg
) {
82 rbtree_ctx
->cached_rbnode
= rbnode
;
84 } else if (reg
> top_reg
) {
85 node
= node
->rb_right
;
86 } else if (reg
< base_reg
) {
94 static int regcache_rbtree_insert(struct rb_root
*root
,
95 struct regcache_rbtree_node
*rbnode
)
97 struct rb_node
**new, *parent
;
98 struct regcache_rbtree_node
*rbnode_tmp
;
99 unsigned int base_reg_tmp
, top_reg_tmp
;
100 unsigned int base_reg
;
103 new = &root
->rb_node
;
105 rbnode_tmp
= container_of(*new, struct regcache_rbtree_node
,
107 /* base and top registers of the current rbnode */
108 regcache_rbtree_get_base_top_reg(rbnode_tmp
, &base_reg_tmp
,
110 /* base register of the rbnode to be added */
111 base_reg
= rbnode
->base_reg
;
113 /* if this register has already been inserted, just return */
114 if (base_reg
>= base_reg_tmp
&&
115 base_reg
<= top_reg_tmp
)
117 else if (base_reg
> top_reg_tmp
)
118 new = &((*new)->rb_right
);
119 else if (base_reg
< base_reg_tmp
)
120 new = &((*new)->rb_left
);
123 /* insert the node into the rbtree */
124 rb_link_node(&rbnode
->node
, parent
, new);
125 rb_insert_color(&rbnode
->node
, root
);
130 #ifdef CONFIG_DEBUG_FS
131 static int rbtree_show(struct seq_file
*s
, void *ignored
)
133 struct regmap
*map
= s
->private;
134 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
135 struct regcache_rbtree_node
*n
;
136 struct rb_node
*node
;
137 unsigned int base
, top
;
141 mutex_lock(&map
->lock
);
143 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
144 node
= rb_next(node
)) {
145 n
= container_of(node
, struct regcache_rbtree_node
, node
);
147 regcache_rbtree_get_base_top_reg(n
, &base
, &top
);
148 seq_printf(s
, "%x-%x (%d)\n", base
, top
, top
- base
+ 1);
151 registers
+= top
- base
+ 1;
154 seq_printf(s
, "%d nodes, %d registers, average %d registers\n",
155 nodes
, registers
, registers
/ nodes
);
157 mutex_unlock(&map
->lock
);
162 static int rbtree_open(struct inode
*inode
, struct file
*file
)
164 return single_open(file
, rbtree_show
, inode
->i_private
);
167 static const struct file_operations rbtree_fops
= {
171 .release
= single_release
,
174 static void rbtree_debugfs_init(struct regmap
*map
)
176 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
179 static void rbtree_debugfs_init(struct regmap
*map
)
184 static int regcache_rbtree_init(struct regmap
*map
)
186 struct regcache_rbtree_ctx
*rbtree_ctx
;
190 map
->cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
194 rbtree_ctx
= map
->cache
;
195 rbtree_ctx
->root
= RB_ROOT
;
196 rbtree_ctx
->cached_rbnode
= NULL
;
198 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
199 ret
= regcache_rbtree_write(map
,
200 map
->reg_defaults
[i
].reg
,
201 map
->reg_defaults
[i
].def
);
206 rbtree_debugfs_init(map
);
211 regcache_rbtree_exit(map
);
215 static int regcache_rbtree_exit(struct regmap
*map
)
217 struct rb_node
*next
;
218 struct regcache_rbtree_ctx
*rbtree_ctx
;
219 struct regcache_rbtree_node
*rbtree_node
;
221 /* if we've already been called then just return */
222 rbtree_ctx
= map
->cache
;
226 /* free up the rbtree */
227 next
= rb_first(&rbtree_ctx
->root
);
229 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
230 next
= rb_next(&rbtree_node
->node
);
231 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
232 kfree(rbtree_node
->block
);
236 /* release the resources */
243 static int regcache_rbtree_read(struct regmap
*map
,
244 unsigned int reg
, unsigned int *value
)
246 struct regcache_rbtree_node
*rbnode
;
247 unsigned int reg_tmp
;
249 rbnode
= regcache_rbtree_lookup(map
, reg
);
251 reg_tmp
= reg
- rbnode
->base_reg
;
252 *value
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
253 map
->cache_word_size
);
262 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node
*rbnode
,
263 unsigned int pos
, unsigned int reg
,
264 unsigned int value
, unsigned int word_size
)
268 blk
= krealloc(rbnode
->block
,
269 (rbnode
->blklen
+ 1) * word_size
, GFP_KERNEL
);
273 /* insert the register value in the correct place in the rbnode block */
274 memmove(blk
+ (pos
+ 1) * word_size
,
275 blk
+ pos
* word_size
,
276 (rbnode
->blklen
- pos
) * word_size
);
278 /* update the rbnode block, its size and the base register */
282 rbnode
->base_reg
= reg
;
284 regcache_rbtree_set_register(rbnode
, pos
, value
, word_size
);
288 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
291 struct regcache_rbtree_ctx
*rbtree_ctx
;
292 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
293 struct rb_node
*node
;
295 unsigned int reg_tmp
;
300 rbtree_ctx
= map
->cache
;
301 /* if we can't locate it in the cached rbnode we'll have
302 * to traverse the rbtree looking for it.
304 rbnode
= regcache_rbtree_lookup(map
, reg
);
306 reg_tmp
= reg
- rbnode
->base_reg
;
307 val
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
308 map
->cache_word_size
);
311 regcache_rbtree_set_register(rbnode
, reg_tmp
, value
,
312 map
->cache_word_size
);
314 /* look for an adjacent register to the one we are about to add */
315 for (node
= rb_first(&rbtree_ctx
->root
); node
;
316 node
= rb_next(node
)) {
317 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
, node
);
318 for (i
= 0; i
< rbnode_tmp
->blklen
; i
++) {
319 reg_tmp
= rbnode_tmp
->base_reg
+ i
;
320 if (abs(reg_tmp
- reg
) != 1)
322 /* decide where in the block to place our register */
323 if (reg_tmp
+ 1 == reg
)
327 ret
= regcache_rbtree_insert_to_block(rbnode_tmp
, pos
,
329 map
->cache_word_size
);
332 rbtree_ctx
->cached_rbnode
= rbnode_tmp
;
336 /* we did not manage to find a place to insert it in an existing
337 * block so create a new rbnode with a single register in its block.
338 * This block will get populated further if any other adjacent
339 * registers get modified in the future.
341 rbnode
= kzalloc(sizeof *rbnode
, GFP_KERNEL
);
345 rbnode
->base_reg
= reg
;
346 rbnode
->block
= kmalloc(rbnode
->blklen
* map
->cache_word_size
,
348 if (!rbnode
->block
) {
352 regcache_rbtree_set_register(rbnode
, 0, value
, map
->cache_word_size
);
353 regcache_rbtree_insert(&rbtree_ctx
->root
, rbnode
);
354 rbtree_ctx
->cached_rbnode
= rbnode
;
360 static int regcache_rbtree_sync(struct regmap
*map
)
362 struct regcache_rbtree_ctx
*rbtree_ctx
;
363 struct rb_node
*node
;
364 struct regcache_rbtree_node
*rbnode
;
370 rbtree_ctx
= map
->cache
;
371 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
372 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
373 for (i
= 0; i
< rbnode
->blklen
; i
++) {
374 regtmp
= rbnode
->base_reg
+ i
;
375 val
= regcache_rbtree_get_register(rbnode
, i
,
376 map
->cache_word_size
);
378 /* Is this the hardware default? If so skip. */
379 ret
= regcache_lookup_reg(map
, i
);
380 if (ret
> 0 && val
== map
->reg_defaults
[ret
].def
)
383 map
->cache_bypass
= 1;
384 ret
= _regmap_write(map
, regtmp
, val
);
385 map
->cache_bypass
= 0;
388 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
396 struct regcache_ops regcache_rbtree_ops
= {
397 .type
= REGCACHE_RBTREE
,
399 .init
= regcache_rbtree_init
,
400 .exit
= regcache_rbtree_exit
,
401 .read
= regcache_rbtree_read
,
402 .write
= regcache_rbtree_write
,
403 .sync
= regcache_rbtree_sync