Merge branch 'timer/cleanup' into late/mvebu2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / sh / clk / cpg.c
CommitLineData
de9186c2
PM
1/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
4d6ddb08 5 * Copyright (C) 2010 - 2012 Paul Mundt
de9186c2
PM
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
fa676ca3
MD
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
764f4e4e
PM
17#define CPG_CKSTP_BIT BIT(8)
18
104fa61a 19static unsigned int sh_clk_read(struct clk *clk)
fa676ca3 20{
4d6ddb08 21 if (clk->flags & CLK_ENABLE_REG_8BIT)
104fa61a 22 return ioread8(clk->mapped_reg);
4d6ddb08 23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
104fa61a 24 return ioread16(clk->mapped_reg);
4d6ddb08 25
104fa61a 26 return ioread32(clk->mapped_reg);
fa676ca3
MD
27}
28
104fa61a 29static void sh_clk_write(int value, struct clk *clk)
fa676ca3 30{
4d6ddb08 31 if (clk->flags & CLK_ENABLE_REG_8BIT)
104fa61a 32 iowrite8(value, clk->mapped_reg);
4d6ddb08 33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
104fa61a 34 iowrite16(value, clk->mapped_reg);
4d6ddb08 35 else
104fa61a
PM
36 iowrite32(value, clk->mapped_reg);
37}
38
39static int sh_clk_mstp_enable(struct clk *clk)
40{
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42 return 0;
43}
44
45static void sh_clk_mstp_disable(struct clk *clk)
46{
47 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
fa676ca3
MD
48}
49
4d6ddb08
PM
50static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
fa676ca3
MD
53 .recalc = followparent_recalc,
54};
55
4d6ddb08 56int __init sh_clk_mstp_register(struct clk *clks, int nr)
fa676ca3
MD
57{
58 struct clk *clkp;
59 int ret = 0;
60 int k;
61
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
4d6ddb08 64 clkp->ops = &sh_clk_mstp_clk_ops;
fa676ca3
MD
65 ret |= clk_register(clkp);
66 }
67
68 return ret;
69}
70
a60977a5
PM
71/*
72 * Div/mult table lookup helpers
73 */
74static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75{
76 return clk->priv;
77}
78
79static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80{
81 return clk_to_div_table(clk)->div_mult_table;
82}
83
75f5f8a5
PM
84/*
85 * Common div ops
86 */
87static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88{
89 return clk_rate_table_round(clk, clk->freq_table, rate);
90}
91
92static unsigned long sh_clk_div_recalc(struct clk *clk)
93{
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
96
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
99
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101
102 return clk->freq_table[idx].frequency;
103}
104
0fa22168
PM
105static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106{
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
110
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
114
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
119
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
123
124 return 0;
125}
126
764f4e4e
PM
127static int sh_clk_div_enable(struct clk *clk)
128{
5a799b82
KM
129 if (clk->div_mask == SH_CLK_DIV6_MSK) {
130 int ret = sh_clk_div_set_rate(clk, clk->rate);
131 if (ret < 0)
132 return ret;
133 }
134
764f4e4e
PM
135 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
136 return 0;
137}
138
139static void sh_clk_div_disable(struct clk *clk)
140{
141 unsigned int val;
142
143 val = sh_clk_read(clk);
144 val |= CPG_CKSTP_BIT;
145
146 /*
147 * div6 clocks require the divisor field to be non-zero or the
148 * above CKSTP toggle silently fails. Ensure that the divisor
149 * array is reset to its initial state on disable.
150 */
151 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
152 val |= clk->div_mask;
153
154 sh_clk_write(val, clk);
155}
156
e3c87607
PM
157static struct sh_clk_ops sh_clk_div_clk_ops = {
158 .recalc = sh_clk_div_recalc,
159 .set_rate = sh_clk_div_set_rate,
160 .round_rate = sh_clk_div_round_rate,
161};
162
163static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
164 .recalc = sh_clk_div_recalc,
165 .set_rate = sh_clk_div_set_rate,
166 .round_rate = sh_clk_div_round_rate,
167 .enable = sh_clk_div_enable,
168 .disable = sh_clk_div_disable,
169};
170
609d7558
PM
171static int __init sh_clk_init_parent(struct clk *clk)
172{
173 u32 val;
174
175 if (clk->parent)
176 return 0;
177
178 if (!clk->parent_table || !clk->parent_num)
179 return 0;
180
181 if (!clk->src_width) {
182 pr_err("sh_clk_init_parent: cannot select parent clock\n");
183 return -EINVAL;
184 }
185
186 val = (sh_clk_read(clk) >> clk->src_shift);
187 val &= (1 << clk->src_width) - 1;
188
189 if (val >= clk->parent_num) {
190 pr_err("sh_clk_init_parent: parent table size failed\n");
191 return -EINVAL;
192 }
193
194 clk_reparent(clk, clk->parent_table[val]);
195 if (!clk->parent) {
196 pr_err("sh_clk_init_parent: unable to set parent");
197 return -EINVAL;
198 }
199
200 return 0;
201}
202
203static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
204 struct clk_div_table *table, struct sh_clk_ops *ops)
205{
206 struct clk *clkp;
207 void *freq_table;
208 int nr_divs = table->div_mult_table->nr_divisors;
209 int freq_table_size = sizeof(struct cpufreq_frequency_table);
210 int ret = 0;
211 int k;
212
213 freq_table_size *= (nr_divs + 1);
214 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
215 if (!freq_table) {
216 pr_err("%s: unable to alloc memory\n", __func__);
217 return -ENOMEM;
218 }
219
220 for (k = 0; !ret && (k < nr); k++) {
221 clkp = clks + k;
222
223 clkp->ops = ops;
224 clkp->priv = table;
225
226 clkp->freq_table = freq_table + (k * freq_table_size);
227 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
228
229 ret = clk_register(clkp);
230 if (ret == 0)
231 ret = sh_clk_init_parent(clkp);
232 }
233
234 return ret;
235}
236
a60977a5
PM
237/*
238 * div6 support
239 */
fa676ca3
MD
240static int sh_clk_div6_divisors[64] = {
241 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
242 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
243 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
244 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
245};
246
a60977a5 247static struct clk_div_mult_table div6_div_mult_table = {
fa676ca3
MD
248 .divisors = sh_clk_div6_divisors,
249 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
250};
251
a60977a5
PM
252static struct clk_div_table sh_clk_div6_table = {
253 .div_mult_table = &div6_div_mult_table,
254};
255
b3dd51a8
GL
256static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
257{
a60977a5 258 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
b3dd51a8
GL
259 u32 value;
260 int ret, i;
261
262 if (!clk->parent_table || !clk->parent_num)
263 return -EINVAL;
264
265 /* Search the parent */
266 for (i = 0; i < clk->parent_num; i++)
267 if (clk->parent_table[i] == parent)
268 break;
269
270 if (i == clk->parent_num)
271 return -ENODEV;
272
273 ret = clk_reparent(clk, parent);
274 if (ret < 0)
275 return ret;
276
104fa61a 277 value = sh_clk_read(clk) &
b3dd51a8
GL
278 ~(((1 << clk->src_width) - 1) << clk->src_shift);
279
104fa61a 280 sh_clk_write(value | (i << clk->src_shift), clk);
b3dd51a8
GL
281
282 /* Rebuild the frequency table */
283 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
52c10ad2 284 table, NULL);
b3dd51a8
GL
285
286 return 0;
287}
288
a0ec360f 289static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
75f5f8a5 290 .recalc = sh_clk_div_recalc,
b3dd51a8 291 .round_rate = sh_clk_div_round_rate,
0fa22168 292 .set_rate = sh_clk_div_set_rate,
764f4e4e
PM
293 .enable = sh_clk_div_enable,
294 .disable = sh_clk_div_disable,
b3dd51a8
GL
295 .set_parent = sh_clk_div6_set_parent,
296};
297
b3dd51a8
GL
298int __init sh_clk_div6_register(struct clk *clks, int nr)
299{
609d7558
PM
300 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
301 &sh_clk_div_enable_clk_ops);
b3dd51a8
GL
302}
303
304int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
305{
609d7558
PM
306 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
307 &sh_clk_div6_reparent_clk_ops);
b3dd51a8
GL
308}
309
a60977a5
PM
310/*
311 * div4 support
312 */
fa676ca3
MD
313static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
314{
a60977a5 315 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
fa676ca3
MD
316 u32 value;
317 int ret;
318
319 /* we really need a better way to determine parent index, but for
320 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
321 * no CLK_ENABLE_ON_INIT means external clock...
322 */
323
324 if (parent->flags & CLK_ENABLE_ON_INIT)
104fa61a 325 value = sh_clk_read(clk) & ~(1 << 7);
fa676ca3 326 else
104fa61a 327 value = sh_clk_read(clk) | (1 << 7);
fa676ca3
MD
328
329 ret = clk_reparent(clk, parent);
330 if (ret < 0)
331 return ret;
332
104fa61a 333 sh_clk_write(value, clk);
fa676ca3
MD
334
335 /* Rebiuld the frequency table */
336 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
337 table, &clk->arch_flags);
338
339 return 0;
340}
341
a0ec360f 342static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
75f5f8a5 343 .recalc = sh_clk_div_recalc,
0fa22168 344 .set_rate = sh_clk_div_set_rate,
fa676ca3 345 .round_rate = sh_clk_div_round_rate,
764f4e4e
PM
346 .enable = sh_clk_div_enable,
347 .disable = sh_clk_div_disable,
fa676ca3
MD
348 .set_parent = sh_clk_div4_set_parent,
349};
350
fa676ca3
MD
351int __init sh_clk_div4_register(struct clk *clks, int nr,
352 struct clk_div4_table *table)
353{
609d7558 354 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
fa676ca3
MD
355}
356
357int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
358 struct clk_div4_table *table)
359{
609d7558
PM
360 return sh_clk_div_register_ops(clks, nr, table,
361 &sh_clk_div_enable_clk_ops);
fa676ca3
MD
362}
363
364int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
365 struct clk_div4_table *table)
366{
609d7558
PM
367 return sh_clk_div_register_ops(clks, nr, table,
368 &sh_clk_div4_reparent_clk_ops);
fa676ca3 369}
9d626ecc
KM
370
371/* FSI-DIV */
372static unsigned long fsidiv_recalc(struct clk *clk)
373{
374 u32 value;
375
376 value = __raw_readl(clk->mapping->base);
377
378 value >>= 16;
379 if (value < 2)
380 return clk->parent->rate;
381
382 return clk->parent->rate / value;
383}
384
385static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
386{
387 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
388}
389
390static void fsidiv_disable(struct clk *clk)
391{
392 __raw_writel(0, clk->mapping->base);
393}
394
395static int fsidiv_enable(struct clk *clk)
396{
397 u32 value;
398
399 value = __raw_readl(clk->mapping->base) >> 16;
400 if (value < 2)
401 return 0;
402
403 __raw_writel((value << 16) | 0x3, clk->mapping->base);
404
405 return 0;
406}
407
408static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
409{
9d626ecc
KM
410 int idx;
411
412 idx = (clk->parent->rate / rate) & 0xffff;
413 if (idx < 2)
414 __raw_writel(0, clk->mapping->base);
415 else
416 __raw_writel(idx << 16, clk->mapping->base);
417
418 return 0;
419}
420
421static struct sh_clk_ops fsidiv_clk_ops = {
422 .recalc = fsidiv_recalc,
423 .round_rate = fsidiv_round_rate,
424 .set_rate = fsidiv_set_rate,
425 .enable = fsidiv_enable,
426 .disable = fsidiv_disable,
427};
428
429int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
430{
431 struct clk_mapping *map;
432 int i;
433
434 for (i = 0; i < nr; i++) {
435
436 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
437 if (!map) {
438 pr_err("%s: unable to alloc memory\n", __func__);
439 return -ENOMEM;
440 }
441
442 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
443 map->phys = (phys_addr_t)clks[i].enable_reg;
444 map->len = 8;
445
446 clks[i].enable_reg = 0; /* remove .enable_reg */
447 clks[i].ops = &fsidiv_clk_ops;
448 clks[i].mapping = map;
449
450 clk_register(&clks[i]);
451 }
452
453 return 0;
454}