fix section mismatch warnings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-mt8127 / mt_gpt.c
CommitLineData
6fa3eb70
S
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/spinlock.h>
5#include <linux/interrupt.h>
6#include <linux/irq.h>
7#include <linux/proc_fs.h>
8#include <linux/syscore_ops.h>
9#include <linux/sched_clock.h>
10#include <linux/version.h>
11#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
12#include <linux/seq_file.h>
13#endif
14#include <mach/mt_reg_base.h>
15#include <mach/mt_gpt.h>
16#include <mach/mt_timer.h>
17#include <mach/irqs.h>
18//#include <mach/mt_boot.h>
19
20//#define CONFIG_CLKSRC_64_BIT
21
22
23#ifdef CONFIG_MT8127_FPGA
24#define SYS_CLK_RATE (6000000) /* FPGA clock source is 6M */
25#else
26#define SYS_CLK_RATE (13000000)
27#endif
28
29/* MT6582 GPT Usage Allocation */
30#ifndef CONFIG_MT8127_FPGA
31#define CONFIG_HAVE_SYSCNT
32#endif
33
34#ifdef CONFIG_HAVE_SYSCNT
35//#define CONFIG_SYSCNT_ASSIST
36
37#define GPT_SYSCNT_ID (GPT6)
38
39#ifdef CONFIG_SYSCNT_ASSIST
40#define GPT_SYSCNT_ASSIST_ID (GPT7)
41#endif
42#endif
43
44#define GPT_CLKEVT_ID (GPT1)
45
46#ifdef CONFIG_CLKSRC_64_BIT
47#define GPT_CLKSRC_ID (GPT6)
48#else
49#define GPT_CLKSRC_ID (GPT3)
50#endif
51
52#define GPT4_1MS_TICK ((U32)13000) // 1000000 / 76.92ns = 13000.520
53#define GPT_IRQEN (APMCU_GPTIMER_BASE + 0x0000)
54#define GPT_IRQSTA (APMCU_GPTIMER_BASE + 0x0004)
55#define GPT_IRQACK (APMCU_GPTIMER_BASE + 0x0008)
56#define GPT1_BASE (APMCU_GPTIMER_BASE + 0x0010)
57#define GPT4_BASE (APMCU_GPTIMER_BASE + 0x0040)
58#define GPT7_BASE (APMCU_GPTIMER_BASE + 0x008C)
59
60
61#define GPT_CON (0x00)
62#define GPT_CLK (0x04)
63#define GPT_CNT (0x08)
64#define GPT_CMP (0x0C)
65#define GPT_CNTH (0x18)
66#define GPT_CMPH (0x1C)
67
68#define GPT_CON_ENABLE (0x1 << 0)
69#define GPT_CON_CLRCNT (0x1 << 1)
70#define GPT_CON_OPMODE (0x3 << 4)
71
72#define GPT_CLK_CLKDIV (0xf << 0)
73#define GPT_CLK_CLKSRC (0x1 << 4)
74
75#define GPT_OPMODE_MASK (0x3)
76#define GPT_CLKDIV_MASK (0xf)
77#define GPT_CLKSRC_MASK (0x1)
78
79#define GPT_OPMODE_OFFSET (4)
80#define GPT_CLKSRC_OFFSET (4)
81
82
83#define GPT_ISR (0x0010)
84#define GPT_IN_USE (0x0100)
85
86#define GPT_FEAT_64_BIT (0x0001)
87
88
89struct gpt_device {
90 unsigned int id;
91 unsigned int mode;
92 unsigned int clksrc;
93 unsigned int clkdiv;
94 unsigned int cmp[2];
95 void (*func)(unsigned long);
96 int flags;
97 int features;
98 unsigned int base_addr;
99};
100static struct gpt_device gpt_devs[NR_GPTS];
101
102/************************return GPT4 count(before init clear) to record kernel start time between LK and kernel****************************/
103static unsigned int boot_time_value = 0;
104
105static unsigned int xgpt_boot_up_time(void)
106{
107 unsigned int tick;
108 tick = DRV_Reg32(GPT4_BASE + GPT_CNT);
109 return ((tick + (GPT4_1MS_TICK - 1)) / GPT4_1MS_TICK);
110}
111/**************************************************************************************************************************/
112
113static struct gpt_device *id_to_dev(unsigned int id)
114{
115 return id < NR_GPTS ? gpt_devs + id : NULL;
116}
117
118
119static DEFINE_SPINLOCK(gpt_lock);
120
121#define gpt_update_lock(flags) \
122do { \
123 spin_lock_irqsave(&gpt_lock, flags); \
124} while (0)
125
126#define gpt_update_unlock(flags) \
127do { \
128 spin_unlock_irqrestore(&gpt_lock, flags); \
129} while (0)
130
131
132static inline void noop(unsigned long data) { }
133static void(*handlers[])(unsigned long) = {
134 noop,
135 noop,
136 noop,
137 noop,
138 noop,
139 noop,
140 noop,
141};
142
143
144static struct tasklet_struct task[NR_GPTS];
145static void task_sched(unsigned long data)
146{
147 unsigned int id = (unsigned int)data;
148 tasklet_schedule(&task[id]);
149}
150
151static void __gpt_set_handler(struct gpt_device *dev, void (*func)(unsigned long))
152{
153 if (func) {
154 if (dev->flags & GPT_ISR)
155 handlers[dev->id] = func;
156 else {
157 tasklet_init(&task[dev->id], func, 0);
158 handlers[dev->id] = task_sched;
159 }
160 }
161 dev->func = func;
162}
163
164static inline unsigned int gpt_get_and_ack_irq(void)
165{
166 unsigned int id;
167 unsigned int mask;
168 unsigned int status = DRV_Reg32(GPT_IRQSTA);
169
170 for (id = GPT1; id < NR_GPTS; id++) {
171 mask = 0x1 << id;
172 if (status & mask) {
173 DRV_WriteReg32(GPT_IRQACK, mask);
174 break;
175 }
176 }
177
178 return id;
179}
180
181static irqreturn_t gpt_handler(int irq, void *dev_id)
182{
183 unsigned int id = gpt_get_and_ack_irq();
184 struct gpt_device *dev = id_to_dev(id);
185
186 if (likely(dev)) {
187 if (!(dev->flags & GPT_ISR)) {
188 handlers[id](id);
189 } else {
190 handlers[id]((unsigned long)dev_id);
191 }
192 } else {
193 printk(KERN_WARNING "GPT id is %d\n", id);
194 }
195
196 return IRQ_HANDLED;
197}
198
199
200static void __gpt_enable_irq(struct gpt_device *dev)
201{
202 DRV_SetReg32(GPT_IRQEN, 0x1 << (dev->id));
203}
204
205static void __gpt_disable_irq(struct gpt_device *dev)
206{
207 DRV_ClrReg32(GPT_IRQEN, 0x1 << (dev->id));
208}
209
210static void __gpt_ack_irq(struct gpt_device *dev)
211{
212 DRV_WriteReg32(GPT_IRQACK, 0x1 << (dev->id));
213}
214
215static void __gpt_reset(struct gpt_device *dev)
216{
217 DRV_WriteReg32(dev->base_addr + GPT_CON, 0x0);
218 __gpt_disable_irq(dev);
219 __gpt_ack_irq(dev);
220 DRV_WriteReg32(dev->base_addr + GPT_CLK, 0x0);
221 DRV_WriteReg32(dev->base_addr + GPT_CON, 0x2);
222 DRV_WriteReg32(dev->base_addr + GPT_CMP, 0x0);
223 if (dev->features & GPT_FEAT_64_BIT) {
224 DRV_WriteReg32(dev->base_addr + GPT_CMPH, 0);
225 }
226}
227
228static void __gpt_clrcnt(struct gpt_device *dev)
229{
230 DRV_SetReg32(dev->base_addr + GPT_CON, GPT_CON_CLRCNT);
231 while (DRV_Reg32(dev->base_addr + GPT_CNT)) {
232 }
233}
234
235static void __gpt_start(struct gpt_device *dev)
236{
237 DRV_SetReg32(dev->base_addr + GPT_CON, GPT_CON_ENABLE);
238}
239
240static void __gpt_start_from_zero(struct gpt_device *dev)
241{
242 //DRV_SetReg32(dev->base_addr + GPT_CON, GPT_CON_ENABLE | GPT_CON_CLRCNT);
243 __gpt_clrcnt(dev);
244 __gpt_start(dev);
245}
246
247/* gpt is counting or not */
248static int __gpt_get_status(struct gpt_device *dev)
249{
250 return !!(DRV_Reg32(dev->base_addr + GPT_CON) & GPT_CON_ENABLE);
251}
252
253static void __gpt_stop(struct gpt_device *dev)
254{
255 DRV_ClrReg32(dev->base_addr + GPT_CON, GPT_CON_ENABLE);
256}
257
258static void __gpt_set_mode(struct gpt_device *dev, unsigned int mode)
259{
260 unsigned int ctl = DRV_Reg32(dev->base_addr + GPT_CON);
261 mode <<= GPT_OPMODE_OFFSET;
262
263 ctl &= ~GPT_CON_OPMODE;
264 ctl |= mode;
265
266 DRV_WriteReg32(dev->base_addr + GPT_CON, ctl);
267
268 dev->mode = mode;
269}
270
271static void __gpt_set_clk(struct gpt_device *dev, unsigned int clksrc, unsigned int clkdiv)
272{
273 unsigned int clk = (clksrc << GPT_CLKSRC_OFFSET) | clkdiv;
274 DRV_WriteReg32(dev->base_addr + GPT_CLK, clk);
275
276 dev->clksrc = clksrc;
277 dev->clkdiv = clkdiv;
278}
279
280static void __gpt_set_cmp(struct gpt_device *dev, unsigned int cmpl,
281 unsigned int cmph)
282{
283 DRV_WriteReg32(dev->base_addr + GPT_CMP, cmpl);
284 dev->cmp[0] = cmpl;
285
286 if (dev->features & GPT_FEAT_64_BIT) {
287 DRV_WriteReg32(dev->base_addr + GPT_CMPH, cmph);
288 dev->cmp[1] = cmpl;
289 }
290}
291
292static void __gpt_get_cmp(struct gpt_device *dev, unsigned int *ptr)
293{
294 *ptr = DRV_Reg32(dev->base_addr + GPT_CMP);
295 if (dev->features & GPT_FEAT_64_BIT) {
296 *(++ptr) = DRV_Reg32(dev->base_addr + GPT_CMPH);
297 }
298}
299
300static void __gpt_get_cnt(struct gpt_device *dev, unsigned int *ptr)
301{
302 *ptr = DRV_Reg32(dev->base_addr + GPT_CNT);
303 if (dev->features & GPT_FEAT_64_BIT) {
304 *(++ptr) = DRV_Reg32(dev->base_addr + GPT_CNTH);
305 }
306}
307
308static void __gpt_set_flags(struct gpt_device *dev, unsigned int flags)
309{
310 dev->flags |= flags;
311}
312
313static void gpt_devs_init(void)
314{
315 int i;
316 for (i = 0; i < NR_GPTS; i++) {
317 gpt_devs[i].id = i;
318 if(GPT7 == i)
319 {
320 gpt_devs[i].base_addr = GPT7_BASE;
321 }
322 else
323 {
324 gpt_devs[i].base_addr = GPT1_BASE + 0x10 * i;
325 }
326 }
327
328 gpt_devs[GPT6].features |= GPT_FEAT_64_BIT;
329}
330
331static void setup_gpt_dev_locked(struct gpt_device *dev, unsigned int mode,
332 unsigned int clksrc, unsigned int clkdiv, unsigned int cmp,
333 void (*func)(unsigned long), unsigned int flags)
334{
335 __gpt_set_flags(dev, flags | GPT_IN_USE);
336
337 __gpt_set_mode(dev, mode & GPT_OPMODE_MASK);
338 __gpt_set_clk(dev, clksrc & GPT_CLKSRC_MASK, clkdiv & GPT_CLKDIV_MASK);
339
340 if (func)
341 __gpt_set_handler(dev, func);
342
343 if (dev->mode != GPT_FREE_RUN) {
344 __gpt_set_cmp(dev, cmp, 0);
345 if (!(dev->flags & GPT_NOIRQEN)) {
346 __gpt_enable_irq(dev);
347 }
348 }
349
350 if (!(dev->flags & GPT_NOAUTOEN))
351 __gpt_start(dev);
352}
353
354
355int request_gpt(unsigned int id, unsigned int mode, unsigned int clksrc,
356 unsigned int clkdiv, unsigned int cmp,
357 void (*func)(unsigned long), unsigned int flags)
358{
359 unsigned long save_flags;
360 struct gpt_device *dev = id_to_dev(id);
361 if (!dev)
362 return -EINVAL;
363
364 if (dev->flags & GPT_IN_USE) {
365 printk(KERN_ERR "%s: GPT%d is in use!\n", __func__, (id + 1));
366 return -EBUSY;
367 }
368
369 gpt_update_lock(save_flags);
370 setup_gpt_dev_locked(dev, mode, clksrc, clkdiv, cmp, func, flags);
371 gpt_update_unlock(save_flags);
372
373 return 0;
374}
375EXPORT_SYMBOL(request_gpt);
376
377static void release_gpt_dev_locked(struct gpt_device *dev)
378{
379 __gpt_reset(dev);
380
381 handlers[dev->id] = noop;
382 dev->func = NULL;
383
384 dev->flags = 0;
385}
386
387int free_gpt(unsigned int id)
388{
389 unsigned long save_flags;
390 struct gpt_device *dev = id_to_dev(id);
391 if (!dev)
392 return -EINVAL;
393
394 if (!(dev->flags & GPT_IN_USE))
395 return 0;
396
397 gpt_update_lock(save_flags);
398 release_gpt_dev_locked(dev);
399 gpt_update_unlock(save_flags);
400
401 return 0;
402}
403EXPORT_SYMBOL(free_gpt);
404
405int start_gpt(unsigned int id)
406{
407 unsigned long save_flags;
408 struct gpt_device *dev = id_to_dev(id);
409
410 if (!dev)
411 return -EINVAL;
412
413 if (!(dev->flags & GPT_IN_USE)) {
414 printk(KERN_ERR "%s: GPT%d is not in use!\n", __func__, id);
415 return -EBUSY;
416 }
417
418 gpt_update_lock(save_flags);
419 __gpt_clrcnt(dev);
420 __gpt_start(dev);
421 gpt_update_unlock(save_flags);
422
423 return 0;
424}
425EXPORT_SYMBOL(start_gpt);
426
427int stop_gpt(unsigned int id)
428{
429 unsigned long save_flags;
430 struct gpt_device *dev = id_to_dev(id);
431 if (!dev)
432 return -EINVAL;
433
434 if (!(dev->flags & GPT_IN_USE)) {
435 printk(KERN_ERR "%s: GPT%d is not in use!\n", __func__, id);
436 return -EBUSY;
437 }
438
439 gpt_update_lock(save_flags);
440 __gpt_stop(dev);
441 gpt_update_unlock(save_flags);
442
443 return 0;
444}
445EXPORT_SYMBOL(stop_gpt);
446
447int restart_gpt(unsigned int id)
448{
449 unsigned long save_flags;
450 struct gpt_device *dev = id_to_dev(id);
451
452 if (!dev)
453 return -EINVAL;
454
455 if (!(dev->flags & GPT_IN_USE)) {
456 printk(KERN_ERR "%s: GPT%d is not in use!\n", __func__, id);
457 return -EBUSY;
458 }
459
460 gpt_update_lock(save_flags);
461 __gpt_start(dev);
462 gpt_update_unlock(save_flags);
463
464 return 0;
465}
466EXPORT_SYMBOL(restart_gpt);
467
468
469int gpt_is_counting(unsigned int id)
470{
471 unsigned long save_flags;
472 int is_counting;
473 struct gpt_device *dev = id_to_dev(id);
474
475 if (!dev)
476 return -EINVAL;
477
478 if (!(dev->flags & GPT_IN_USE)) {
479 printk(KERN_ERR "%s: GPT%d is not in use!\n", __func__, id);
480 return -EBUSY;
481 }
482
483 gpt_update_lock(save_flags);
484 is_counting = __gpt_get_status(dev);
485 gpt_update_unlock(save_flags);
486
487 return is_counting;
488}
489EXPORT_SYMBOL(gpt_is_counting);
490
491
492int gpt_set_cmp(unsigned int id, unsigned int val)
493{
494 unsigned long save_flags;
495 struct gpt_device *dev = id_to_dev(id);
496
497 if (!dev)
498 return -EINVAL;
499
500 if (dev->mode == GPT_FREE_RUN)
501 return -EINVAL;
502
503 gpt_update_lock(save_flags);
504 __gpt_set_cmp(dev, val, 0);
505 gpt_update_unlock(save_flags);
506
507 return 0;
508}
509EXPORT_SYMBOL(gpt_set_cmp);
510
511int gpt_get_cmp(unsigned int id, unsigned int *ptr)
512{
513 unsigned long save_flags;
514 struct gpt_device *dev = id_to_dev(id);
515 if (!dev || !ptr)
516 return -EINVAL;
517
518 gpt_update_lock(save_flags);
519 __gpt_get_cmp(dev, ptr);
520 gpt_update_unlock(save_flags);
521
522 return 0;
523}
524EXPORT_SYMBOL(gpt_get_cmp);
525
526int gpt_get_cnt(unsigned int id, unsigned int *ptr)
527{
528 unsigned long save_flags;
529 struct gpt_device *dev = id_to_dev(id);
530 if (!dev || !ptr)
531 return -EINVAL;
532
533 if (!(dev->features & GPT_FEAT_64_BIT)) {
534 __gpt_get_cnt(dev, ptr);
535 } else {
536 gpt_update_lock(save_flags);
537 __gpt_get_cnt(dev, ptr);
538 gpt_update_unlock(save_flags);
539 }
540
541 return 0;
542}
543EXPORT_SYMBOL(gpt_get_cnt);
544
545
546int gpt_check_irq(unsigned int id)
547{
548 unsigned int mask = 0x1 << id;
549 unsigned int status = DRV_Reg32(GPT_IRQSTA);
550
551 return (status & mask) ? 1 : 0;
552}
553EXPORT_SYMBOL(gpt_check_irq);
554
555
556int gpt_check_and_ack_irq(unsigned int id)
557{
558 unsigned int mask = 0x1 << id;
559 unsigned int status = DRV_Reg32(GPT_IRQSTA);
560
561 if (status & mask) {
562 DRV_WriteReg32(GPT_IRQACK, mask);
563 return 1;
564 } else {
565 return 0;
566 }
567}
568EXPORT_SYMBOL(gpt_check_and_ack_irq);
569
570unsigned int gpt_boot_time(void)
571{
572 return boot_time_value;
573}
574EXPORT_SYMBOL(gpt_boot_time);
575
576static int mt_gpt_set_next_event(unsigned long cycles,
577 struct clock_event_device *evt)
578{
579 struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
580
581 //printk("[%s]entry, evt=%lu\n", __func__, cycles);
582
583 __gpt_stop(dev);
584 __gpt_set_cmp(dev, cycles, 0);
585 __gpt_start_from_zero(dev);
586
587 return 0;
588}
589
590static void mt_gpt_set_mode(enum clock_event_mode mode,
591 struct clock_event_device *evt)
592{
593 struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
594
595 //printk("[%s]entry, mode=%d\n", __func__, mode);
596 switch (mode) {
597 case CLOCK_EVT_MODE_PERIODIC:
598 __gpt_stop(dev);
599 __gpt_set_mode(dev, GPT_REPEAT);
600 __gpt_enable_irq(dev);
601 __gpt_start_from_zero(dev);
602 break;
603
604 case CLOCK_EVT_MODE_ONESHOT:
605 __gpt_stop(dev);
606 __gpt_set_mode(dev, GPT_ONE_SHOT);
607 __gpt_enable_irq(dev);
608 __gpt_start_from_zero(dev);
609 break;
610
611 case CLOCK_EVT_MODE_UNUSED:
612 case CLOCK_EVT_MODE_SHUTDOWN:
613 __gpt_stop(dev);
614 __gpt_disable_irq(dev);
615 __gpt_ack_irq(dev);
616 case CLOCK_EVT_MODE_RESUME:
617 break;
618 }
619}
620
621static cycle_t mt_gpt_read(struct clocksource *cs)
622{
623 cycle_t cycles;
624 unsigned int cnt[2] = {0, 0};
625 struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
626 __gpt_get_cnt(dev, cnt);
627
628 cycles = ((cycle_t)(cnt[1])) << 32 | (cycle_t)(cnt[0]);
629
630 return cycles;
631}
632
633static long notrace mt_read_sched_clock(void)
634{
635 return mt_gpt_read(NULL);
636}
637
8ca3027e
S
638static void __init mt_gpt_init(void);
639struct mt_clock __refdata mt6582_gpt =
6fa3eb70
S
640{
641 .clockevent =
642 {
643 .name = "mt6582-gpt",
644 .features = CLOCK_EVT_FEAT_ONESHOT,
645 .shift = 32,
646 .rating = 300,
647 .set_next_event = mt_gpt_set_next_event,
648 .set_mode = mt_gpt_set_mode,
649 },
650 .clocksource =
651 {
652 .name = "mt6582-gpt",
653 .rating = 300,
654 .read = mt_gpt_read,
655 .mask = CLOCKSOURCE_MASK(32),
656 .shift = 25,
657 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
658 },
659 .irq =
660 {
661 .name = "mt6582-gpt",
662 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL |IRQF_TRIGGER_LOW,
663 .handler = gpt_handler,
664 .dev_id = &mt6582_gpt.clockevent,
665 .irq = MT6582_APARM_GPTTIMER_IRQ_LINE,
666 },
667 .init_func = mt_gpt_init,
668};
669
670static void clkevt_handler(unsigned long data)
671{
672 struct clock_event_device *evt = (struct clock_event_device*)data;
673 evt->event_handler(evt);
674}
675
676static inline void setup_clkevt(void)
677{
678 unsigned int cmp;
679 struct clock_event_device *evt = &mt6582_gpt.clockevent;
680 struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
681
682 evt->mult = div_sc(SYS_CLK_RATE, NSEC_PER_SEC, evt->shift);
683 evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
684 evt->min_delta_ns = clockevent_delta2ns(3, evt);
685 evt->cpumask = cpumask_of(0);
686#ifndef CONFIG_MT8127_FPGA
687
688 setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
689 SYS_CLK_RATE / HZ, clkevt_handler, GPT_ISR);
690#else
691 setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
692 SYS_CLK_RATE / HZ, clkevt_handler, GPT_ISR);
693#endif
694
695 __gpt_get_cmp(dev, &cmp);
696 printk("GPT1_CMP = %d, HZ = %d\n", cmp, HZ);
697}
698
699#ifndef CONFIG_CLKSRC_64_BIT
700static inline void setup_clksrc(void)
701{
702 struct clocksource *cs = &mt6582_gpt.clocksource;
703 struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
704
705 cs->mult = clocksource_hz2mult(SYS_CLK_RATE, cs->shift);
706
707 setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
708 0, NULL, 0);
709 sched_clock_register((void *)mt_read_sched_clock, 32, SYS_CLK_RATE);
710}
711#else
712
713static u32 notrace jiffy_sched_clock_read(void)
714{
715 return (u32)(jiffies - INITIAL_JIFFIES);
716}
717
718static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
719{
720 return (cyc * mult) >> shift;
721}
722
723static u32 g_clksrc_init =0;
724
725unsigned long long notrace sched_clock(void)
726{
727 struct clocksource *cs = &mt6582_gpt.clocksource;
728 cycle_t cycles;
729
730 if(0 == g_clksrc_init)
731 {
732 return jiffy_sched_clock_read();
733 }
734 else
735 {
736 cycles = mt_gpt_read(cs);
737 cycles *= 1000 << 1;
738#ifndef CONFIG_MT8127_FPGA
739 do_div(cycles, 13 << 1);
740#else
741 do_div(cycles, 6 << 1);
742#endif
743 return cycles;
744 //return cyc_to_ns(cycles,cs->mult, cs->shift);
745 }
746
747}
748static inline void setup_clksrc(void)
749{
750 struct clocksource *cs = &mt6582_gpt.clocksource;
751 struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
752
753 cs->mult = clocksource_hz2mult(SYS_CLK_RATE, cs->shift);
754#ifndef CONFIG_MT8127_FPGA
755
756 setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
757 0, NULL, 0);
758#else
759
760 setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
761 0, NULL, 0);
762
763#endif
764 g_clksrc_init = 1;
765}
766#endif
767
768#ifdef CONFIG_HAVE_SYSCNT
769static inline void setup_syscnt(void)
770{
771 struct gpt_device *dev = id_to_dev(GPT_SYSCNT_ID);
772
773#ifndef CONFIG_MT8127_FPGA
774
775 setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
776 0, NULL, 0);
777#else
778 //use div2 for 6Mhz
779 setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
780 0, NULL, 0);
781#endif
782
783 printk("fwq sysc count \n");
784}
785#else
786static inline void setup_syscnt(void) {}
787#endif
788
789#if defined(CONFIG_HAVE_SYSCNT) && defined(CONFIG_SYSCNT_ASSIST)
790
791#define read_cntpct(cntpct_lo, cntpct_hi) \
792do { \
793 __asm__ __volatile__( \
794 "MRRC p15, 0, %0, %1, c14\n" \
795 :"=r"(cntpct_lo), "=r"(cntpct_hi) \
796 : \
797 :"memory"); \
798} while (0)
799
800
801#define CHECK_WARNING_TIMERS 10
802
803static unsigned int loop = 0;
804
805static void syscnt_assist_handler(unsigned long data)
806{
807 unsigned int assist_cnt;
808 unsigned int syscnt_cnt[2] = {0};
809
810 unsigned int cnth;
811 unsigned int pct_lo, pct_hi;
812
813 int cnt = 0;
814
815 struct gpt_device *assist_dev = id_to_dev(GPT_SYSCNT_ASSIST_ID);
816 struct gpt_device *syscnt_dev = id_to_dev(GPT_SYSCNT_ID);
817
818 __gpt_get_cnt(assist_dev, &assist_cnt);
819 __gpt_get_cnt(syscnt_dev, syscnt_cnt);
820
821 loop++;
822
823 do {
824 cnt++;
825 cnth = DRV_Reg32(syscnt_dev->base_addr + GPT_CNTH);
826 if ((cnt / CHECK_WARNING_TIMERS) && !(cnt % CHECK_WARNING_TIMERS)) {
827 printk("[%s]WARNING: fail to sync GPT_CNTH!! assist(0x%08x),"
828 "syscnt(0x%08x,0x%08x),cnth(0x%08x),loop(0x%08x),cnt(%d)\n",
829 __func__, assist_cnt, syscnt_cnt[0], syscnt_cnt[1],
830 cnth, loop, cnt);
831 }
832 } while (cnth != loop);
833
834 read_cntpct(pct_lo, pct_hi);
835 WARN_ON(pct_hi != loop);
836
837 printk("[%s]syscnt assist IRQ!! assist(0x%08x),syscnt(0x%08x,0x%08x),"
838 "cnth:pct_hi:loop(0x%08x,0x%08x,0x%08x),cnt(%d)\n", __func__,
839 assist_cnt, syscnt_cnt[0], syscnt_cnt[1], cnth, pct_hi, loop, cnt);
840}
841
842static void syscnt_assist_resume(void)
843{
844 unsigned int old_loop;
845 unsigned int assist_cnt1, assist_cnt2;
846 unsigned int syscnt_cnt[2] = {0};
847
848 struct gpt_device *assist_dev = id_to_dev(GPT_SYSCNT_ASSIST_ID);
849 struct gpt_device *syscnt_dev = id_to_dev(GPT_SYSCNT_ID);
850
851 do {
852 __gpt_get_cnt(assist_dev, &assist_cnt1);
853 __gpt_get_cnt(syscnt_dev, syscnt_cnt);
854 __gpt_ack_irq(assist_dev);
855 __gpt_get_cnt(assist_dev, &assist_cnt2);
856 } while (assist_cnt1 > assist_cnt2);
857
858 old_loop = loop;
859 loop = syscnt_cnt[1];
860
861 printk("[%s]assist(0x%08x, 0x%08x),syscnt(0x%08x,0x%08x),loop(%u->%u)\n",
862 __func__, assist_cnt1, assist_cnt2, syscnt_cnt[0], syscnt_cnt[1],
863 old_loop, loop);
864}
865
866static struct syscore_ops syscnt_assist_syscore_ops = {
867 .resume = syscnt_assist_resume,
868};
869
870static int __init syscnt_assist_init_ops(void)
871{
872 register_syscore_ops(&syscnt_assist_syscore_ops);
873 return 0;
874}
875
876static inline void setup_syscnt_assist(void)
877{
878 struct gpt_device *dev = id_to_dev(GPT_SYSCNT_ASSIST_ID);
879
880 setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
881 0xFFFFFFFF, syscnt_assist_handler, GPT_ISR | GPT_NOAUTOEN);
882
883 syscnt_assist_init_ops();
884}
885
886static inline void start_syscnt_assist(void)
887{
888 struct gpt_device *dev = id_to_dev(GPT_SYSCNT_ASSIST_ID);
889
890 __gpt_start(dev);
891}
892
893#else
894static inline void setup_syscnt_assist(void) {}
895static inline void start_syscnt_assist(void) {}
896#endif
897
898static void mt_gpt_init(void)
899{
900 int i;
901 unsigned long save_flags;
902 boot_time_value = xgpt_boot_up_time(); /*record the time when init GPT*/
903
904 gpt_update_lock(save_flags);
905
906 gpt_devs_init();
907
908 for (i = 0; i < NR_GPTS; i++) {
909 __gpt_reset(&gpt_devs[i]);
910 }
911
912 setup_clkevt();
913
914 setup_clksrc();
915#if 1 //fix me after bring up
916
917 // if (CHIP_SW_VER_01 <= mt_get_chip_sw_ver()) {
918 // setup_syscnt_assist();
919 //}
920#endif
921 setup_syscnt();
922
923#if 1 //fix me after bring up
924
925 // if (CHIP_SW_VER_01 <= mt_get_chip_sw_ver()) {
926 // start_syscnt_assist();
927 //}
928#endif
929 gpt_update_unlock(save_flags);
930}
931
932#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
933static int gpt_stat_read(char *page, char **start, off_t off,
934 int count, int *eof, void *data)
935{
936 char *p = page;
937 int len = 0;
938 int i = 0;
939 int in_use;
940 int is_counting;
941
942 p += sprintf(p, "\n(HW Timer) GPT Status :\n");
943 p += sprintf(p, "=========================================\n");
944
945 for (i = 0; i < NR_GPTS; i++) {
946 in_use = gpt_devs[i].flags & GPT_IN_USE;
947 is_counting = gpt_is_counting(i);
948 p += sprintf(p, "[GPT%d]in_use:%s, is_counting:%s\n", i+1,
949 in_use ? "Y" : "N", is_counting ? "Y" : "N");
950 }
951
952 *start = page + off;
953
954 len = p - page;
955 if (len > off)
956 len -= off;
957 else
958 len = 0;
959
960 *eof = 1;
961
962 return len < count ? len : count;
963}
964#else
965static int gpt_stat_read_show(struct seq_file *m, void *v)
966{
967 int i = 0;
968 int in_use;
969 int is_counting;
970
971 seq_printf(m, "\n(HW Timer) GPT Status :\n");
972 seq_printf(m, "=========================================\n");
973 for (i = 0; i < NR_GPTS; i++) {
974 in_use = gpt_devs[i].flags & GPT_IN_USE;
975 is_counting = gpt_is_counting(i);
976 seq_printf(m, "[GPT%d]in_use:%s, is_counting:%s\n", i+1,
977 in_use ? "Y" : "N", is_counting ? "Y" : "N");
978 }
979
980 return 0;
981}
982
983static int gpt_stat_read_open(struct inode *inode, struct file *file)
984{
985 return single_open(file, gpt_stat_read_show, NULL);
986}
987
988static const struct file_operations gpt_stat_read_fops = {
989 .open = gpt_stat_read_open,
990 .read = seq_read,
991 .llseek = seq_lseek,
992 .release= seq_release,
993};
994#endif
995static int __init gpt_mod_init(void)
996{
997#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
998 create_proc_read_entry("gpt_stat", S_IRUGO, NULL, gpt_stat_read, NULL);
999#else
1000 proc_create("gpt_stat", S_IRUGO, NULL, &gpt_stat_read_fops);
1001#endif
1002
1003#if 0
1004#ifndef CONFIG_MT8127_FPGA
1005
1006 printk("GPT: chipver=%d\n", mt_get_chip_sw_ver());
1007#else
1008 printk("GPT: FPGA2\n" );
1009#endif
1010
1011#endif
1012 printk("GPT: iniit\n" );
1013
1014 return 0;
1015}
1016module_init(gpt_mod_init);
1017
1018MODULE_DESCRIPTION("MT6582 GPT Driver v0.1");
1019MODULE_LICENSE("GPL");