1 #include <linux/init.h>
2 #include <linux/module.h>
3 #include <linux/kernel.h>
4 #include <linux/spinlock.h>
5 #include <linux/interrupt.h>
7 #include <linux/proc_fs.h>
8 #include <linux/syscore_ops.h>
9 #include <linux/sched_clock.h>
10 #include <linux/version.h>
11 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
12 #include <linux/seq_file.h>
14 #include <mach/mt_reg_base.h>
15 #include <mach/mt_gpt.h>
16 #include <mach/mt_timer.h>
17 #include <mach/irqs.h>
18 //#include <mach/mt_boot.h>
20 //#define CONFIG_CLKSRC_64_BIT
23 #ifdef CONFIG_MT8127_FPGA
24 #define SYS_CLK_RATE (6000000) /* FPGA clock source is 6M */
26 #define SYS_CLK_RATE (13000000)
29 /* MT6582 GPT Usage Allocation */
30 #ifndef CONFIG_MT8127_FPGA
31 #define CONFIG_HAVE_SYSCNT
34 #ifdef CONFIG_HAVE_SYSCNT
35 //#define CONFIG_SYSCNT_ASSIST
37 #define GPT_SYSCNT_ID (GPT6)
39 #ifdef CONFIG_SYSCNT_ASSIST
40 #define GPT_SYSCNT_ASSIST_ID (GPT7)
44 #define GPT_CLKEVT_ID (GPT1)
46 #ifdef CONFIG_CLKSRC_64_BIT
47 #define GPT_CLKSRC_ID (GPT6)
49 #define GPT_CLKSRC_ID (GPT3)
52 #define GPT4_1MS_TICK ((U32)13000) // 1000000 / 76.92ns = 13000.520
53 #define GPT_IRQEN (APMCU_GPTIMER_BASE + 0x0000)
54 #define GPT_IRQSTA (APMCU_GPTIMER_BASE + 0x0004)
55 #define GPT_IRQACK (APMCU_GPTIMER_BASE + 0x0008)
56 #define GPT1_BASE (APMCU_GPTIMER_BASE + 0x0010)
57 #define GPT4_BASE (APMCU_GPTIMER_BASE + 0x0040)
58 #define GPT7_BASE (APMCU_GPTIMER_BASE + 0x008C)
61 #define GPT_CON (0x00)
62 #define GPT_CLK (0x04)
63 #define GPT_CNT (0x08)
64 #define GPT_CMP (0x0C)
65 #define GPT_CNTH (0x18)
66 #define GPT_CMPH (0x1C)
68 #define GPT_CON_ENABLE (0x1 << 0)
69 #define GPT_CON_CLRCNT (0x1 << 1)
70 #define GPT_CON_OPMODE (0x3 << 4)
72 #define GPT_CLK_CLKDIV (0xf << 0)
73 #define GPT_CLK_CLKSRC (0x1 << 4)
75 #define GPT_OPMODE_MASK (0x3)
76 #define GPT_CLKDIV_MASK (0xf)
77 #define GPT_CLKSRC_MASK (0x1)
79 #define GPT_OPMODE_OFFSET (4)
80 #define GPT_CLKSRC_OFFSET (4)
83 #define GPT_ISR (0x0010)
84 #define GPT_IN_USE (0x0100)
86 #define GPT_FEAT_64_BIT (0x0001)
95 void (*func
)(unsigned long);
98 unsigned int base_addr
;
100 static struct gpt_device gpt_devs
[NR_GPTS
];
102 /************************return GPT4 count(before init clear) to record kernel start time between LK and kernel****************************/
103 static unsigned int boot_time_value
= 0;
105 static unsigned int xgpt_boot_up_time(void)
108 tick
= DRV_Reg32(GPT4_BASE
+ GPT_CNT
);
109 return ((tick
+ (GPT4_1MS_TICK
- 1)) / GPT4_1MS_TICK
);
111 /**************************************************************************************************************************/
113 static struct gpt_device
*id_to_dev(unsigned int id
)
115 return id
< NR_GPTS
? gpt_devs
+ id
: NULL
;
119 static DEFINE_SPINLOCK(gpt_lock
);
121 #define gpt_update_lock(flags) \
123 spin_lock_irqsave(&gpt_lock, flags); \
126 #define gpt_update_unlock(flags) \
128 spin_unlock_irqrestore(&gpt_lock, flags); \
132 static inline void noop(unsigned long data
) { }
133 static void(*handlers
[])(unsigned long) = {
144 static struct tasklet_struct task
[NR_GPTS
];
145 static void task_sched(unsigned long data
)
147 unsigned int id
= (unsigned int)data
;
148 tasklet_schedule(&task
[id
]);
151 static void __gpt_set_handler(struct gpt_device
*dev
, void (*func
)(unsigned long))
154 if (dev
->flags
& GPT_ISR
)
155 handlers
[dev
->id
] = func
;
157 tasklet_init(&task
[dev
->id
], func
, 0);
158 handlers
[dev
->id
] = task_sched
;
164 static inline unsigned int gpt_get_and_ack_irq(void)
168 unsigned int status
= DRV_Reg32(GPT_IRQSTA
);
170 for (id
= GPT1
; id
< NR_GPTS
; id
++) {
173 DRV_WriteReg32(GPT_IRQACK
, mask
);
181 static irqreturn_t
gpt_handler(int irq
, void *dev_id
)
183 unsigned int id
= gpt_get_and_ack_irq();
184 struct gpt_device
*dev
= id_to_dev(id
);
187 if (!(dev
->flags
& GPT_ISR
)) {
190 handlers
[id
]((unsigned long)dev_id
);
193 printk(KERN_WARNING
"GPT id is %d\n", id
);
200 static void __gpt_enable_irq(struct gpt_device
*dev
)
202 DRV_SetReg32(GPT_IRQEN
, 0x1 << (dev
->id
));
205 static void __gpt_disable_irq(struct gpt_device
*dev
)
207 DRV_ClrReg32(GPT_IRQEN
, 0x1 << (dev
->id
));
210 static void __gpt_ack_irq(struct gpt_device
*dev
)
212 DRV_WriteReg32(GPT_IRQACK
, 0x1 << (dev
->id
));
215 static void __gpt_reset(struct gpt_device
*dev
)
217 DRV_WriteReg32(dev
->base_addr
+ GPT_CON
, 0x0);
218 __gpt_disable_irq(dev
);
220 DRV_WriteReg32(dev
->base_addr
+ GPT_CLK
, 0x0);
221 DRV_WriteReg32(dev
->base_addr
+ GPT_CON
, 0x2);
222 DRV_WriteReg32(dev
->base_addr
+ GPT_CMP
, 0x0);
223 if (dev
->features
& GPT_FEAT_64_BIT
) {
224 DRV_WriteReg32(dev
->base_addr
+ GPT_CMPH
, 0);
228 static void __gpt_clrcnt(struct gpt_device
*dev
)
230 DRV_SetReg32(dev
->base_addr
+ GPT_CON
, GPT_CON_CLRCNT
);
231 while (DRV_Reg32(dev
->base_addr
+ GPT_CNT
)) {
235 static void __gpt_start(struct gpt_device
*dev
)
237 DRV_SetReg32(dev
->base_addr
+ GPT_CON
, GPT_CON_ENABLE
);
240 static void __gpt_start_from_zero(struct gpt_device
*dev
)
242 //DRV_SetReg32(dev->base_addr + GPT_CON, GPT_CON_ENABLE | GPT_CON_CLRCNT);
247 /* gpt is counting or not */
248 static int __gpt_get_status(struct gpt_device
*dev
)
250 return !!(DRV_Reg32(dev
->base_addr
+ GPT_CON
) & GPT_CON_ENABLE
);
253 static void __gpt_stop(struct gpt_device
*dev
)
255 DRV_ClrReg32(dev
->base_addr
+ GPT_CON
, GPT_CON_ENABLE
);
258 static void __gpt_set_mode(struct gpt_device
*dev
, unsigned int mode
)
260 unsigned int ctl
= DRV_Reg32(dev
->base_addr
+ GPT_CON
);
261 mode
<<= GPT_OPMODE_OFFSET
;
263 ctl
&= ~GPT_CON_OPMODE
;
266 DRV_WriteReg32(dev
->base_addr
+ GPT_CON
, ctl
);
271 static void __gpt_set_clk(struct gpt_device
*dev
, unsigned int clksrc
, unsigned int clkdiv
)
273 unsigned int clk
= (clksrc
<< GPT_CLKSRC_OFFSET
) | clkdiv
;
274 DRV_WriteReg32(dev
->base_addr
+ GPT_CLK
, clk
);
276 dev
->clksrc
= clksrc
;
277 dev
->clkdiv
= clkdiv
;
280 static void __gpt_set_cmp(struct gpt_device
*dev
, unsigned int cmpl
,
283 DRV_WriteReg32(dev
->base_addr
+ GPT_CMP
, cmpl
);
286 if (dev
->features
& GPT_FEAT_64_BIT
) {
287 DRV_WriteReg32(dev
->base_addr
+ GPT_CMPH
, cmph
);
292 static void __gpt_get_cmp(struct gpt_device
*dev
, unsigned int *ptr
)
294 *ptr
= DRV_Reg32(dev
->base_addr
+ GPT_CMP
);
295 if (dev
->features
& GPT_FEAT_64_BIT
) {
296 *(++ptr
) = DRV_Reg32(dev
->base_addr
+ GPT_CMPH
);
300 static void __gpt_get_cnt(struct gpt_device
*dev
, unsigned int *ptr
)
302 *ptr
= DRV_Reg32(dev
->base_addr
+ GPT_CNT
);
303 if (dev
->features
& GPT_FEAT_64_BIT
) {
304 *(++ptr
) = DRV_Reg32(dev
->base_addr
+ GPT_CNTH
);
308 static void __gpt_set_flags(struct gpt_device
*dev
, unsigned int flags
)
313 static void gpt_devs_init(void)
316 for (i
= 0; i
< NR_GPTS
; i
++) {
320 gpt_devs
[i
].base_addr
= GPT7_BASE
;
324 gpt_devs
[i
].base_addr
= GPT1_BASE
+ 0x10 * i
;
328 gpt_devs
[GPT6
].features
|= GPT_FEAT_64_BIT
;
331 static void setup_gpt_dev_locked(struct gpt_device
*dev
, unsigned int mode
,
332 unsigned int clksrc
, unsigned int clkdiv
, unsigned int cmp
,
333 void (*func
)(unsigned long), unsigned int flags
)
335 __gpt_set_flags(dev
, flags
| GPT_IN_USE
);
337 __gpt_set_mode(dev
, mode
& GPT_OPMODE_MASK
);
338 __gpt_set_clk(dev
, clksrc
& GPT_CLKSRC_MASK
, clkdiv
& GPT_CLKDIV_MASK
);
341 __gpt_set_handler(dev
, func
);
343 if (dev
->mode
!= GPT_FREE_RUN
) {
344 __gpt_set_cmp(dev
, cmp
, 0);
345 if (!(dev
->flags
& GPT_NOIRQEN
)) {
346 __gpt_enable_irq(dev
);
350 if (!(dev
->flags
& GPT_NOAUTOEN
))
355 int request_gpt(unsigned int id
, unsigned int mode
, unsigned int clksrc
,
356 unsigned int clkdiv
, unsigned int cmp
,
357 void (*func
)(unsigned long), unsigned int flags
)
359 unsigned long save_flags
;
360 struct gpt_device
*dev
= id_to_dev(id
);
364 if (dev
->flags
& GPT_IN_USE
) {
365 printk(KERN_ERR
"%s: GPT%d is in use!\n", __func__
, (id
+ 1));
369 gpt_update_lock(save_flags
);
370 setup_gpt_dev_locked(dev
, mode
, clksrc
, clkdiv
, cmp
, func
, flags
);
371 gpt_update_unlock(save_flags
);
375 EXPORT_SYMBOL(request_gpt
);
377 static void release_gpt_dev_locked(struct gpt_device
*dev
)
381 handlers
[dev
->id
] = noop
;
387 int free_gpt(unsigned int id
)
389 unsigned long save_flags
;
390 struct gpt_device
*dev
= id_to_dev(id
);
394 if (!(dev
->flags
& GPT_IN_USE
))
397 gpt_update_lock(save_flags
);
398 release_gpt_dev_locked(dev
);
399 gpt_update_unlock(save_flags
);
403 EXPORT_SYMBOL(free_gpt
);
405 int start_gpt(unsigned int id
)
407 unsigned long save_flags
;
408 struct gpt_device
*dev
= id_to_dev(id
);
413 if (!(dev
->flags
& GPT_IN_USE
)) {
414 printk(KERN_ERR
"%s: GPT%d is not in use!\n", __func__
, id
);
418 gpt_update_lock(save_flags
);
421 gpt_update_unlock(save_flags
);
425 EXPORT_SYMBOL(start_gpt
);
427 int stop_gpt(unsigned int id
)
429 unsigned long save_flags
;
430 struct gpt_device
*dev
= id_to_dev(id
);
434 if (!(dev
->flags
& GPT_IN_USE
)) {
435 printk(KERN_ERR
"%s: GPT%d is not in use!\n", __func__
, id
);
439 gpt_update_lock(save_flags
);
441 gpt_update_unlock(save_flags
);
445 EXPORT_SYMBOL(stop_gpt
);
447 int restart_gpt(unsigned int id
)
449 unsigned long save_flags
;
450 struct gpt_device
*dev
= id_to_dev(id
);
455 if (!(dev
->flags
& GPT_IN_USE
)) {
456 printk(KERN_ERR
"%s: GPT%d is not in use!\n", __func__
, id
);
460 gpt_update_lock(save_flags
);
462 gpt_update_unlock(save_flags
);
466 EXPORT_SYMBOL(restart_gpt
);
469 int gpt_is_counting(unsigned int id
)
471 unsigned long save_flags
;
473 struct gpt_device
*dev
= id_to_dev(id
);
478 if (!(dev
->flags
& GPT_IN_USE
)) {
479 printk(KERN_ERR
"%s: GPT%d is not in use!\n", __func__
, id
);
483 gpt_update_lock(save_flags
);
484 is_counting
= __gpt_get_status(dev
);
485 gpt_update_unlock(save_flags
);
489 EXPORT_SYMBOL(gpt_is_counting
);
492 int gpt_set_cmp(unsigned int id
, unsigned int val
)
494 unsigned long save_flags
;
495 struct gpt_device
*dev
= id_to_dev(id
);
500 if (dev
->mode
== GPT_FREE_RUN
)
503 gpt_update_lock(save_flags
);
504 __gpt_set_cmp(dev
, val
, 0);
505 gpt_update_unlock(save_flags
);
509 EXPORT_SYMBOL(gpt_set_cmp
);
511 int gpt_get_cmp(unsigned int id
, unsigned int *ptr
)
513 unsigned long save_flags
;
514 struct gpt_device
*dev
= id_to_dev(id
);
518 gpt_update_lock(save_flags
);
519 __gpt_get_cmp(dev
, ptr
);
520 gpt_update_unlock(save_flags
);
524 EXPORT_SYMBOL(gpt_get_cmp
);
526 int gpt_get_cnt(unsigned int id
, unsigned int *ptr
)
528 unsigned long save_flags
;
529 struct gpt_device
*dev
= id_to_dev(id
);
533 if (!(dev
->features
& GPT_FEAT_64_BIT
)) {
534 __gpt_get_cnt(dev
, ptr
);
536 gpt_update_lock(save_flags
);
537 __gpt_get_cnt(dev
, ptr
);
538 gpt_update_unlock(save_flags
);
543 EXPORT_SYMBOL(gpt_get_cnt
);
546 int gpt_check_irq(unsigned int id
)
548 unsigned int mask
= 0x1 << id
;
549 unsigned int status
= DRV_Reg32(GPT_IRQSTA
);
551 return (status
& mask
) ? 1 : 0;
553 EXPORT_SYMBOL(gpt_check_irq
);
556 int gpt_check_and_ack_irq(unsigned int id
)
558 unsigned int mask
= 0x1 << id
;
559 unsigned int status
= DRV_Reg32(GPT_IRQSTA
);
562 DRV_WriteReg32(GPT_IRQACK
, mask
);
568 EXPORT_SYMBOL(gpt_check_and_ack_irq
);
570 unsigned int gpt_boot_time(void)
572 return boot_time_value
;
574 EXPORT_SYMBOL(gpt_boot_time
);
576 static int mt_gpt_set_next_event(unsigned long cycles
,
577 struct clock_event_device
*evt
)
579 struct gpt_device
*dev
= id_to_dev(GPT_CLKEVT_ID
);
581 //printk("[%s]entry, evt=%lu\n", __func__, cycles);
584 __gpt_set_cmp(dev
, cycles
, 0);
585 __gpt_start_from_zero(dev
);
590 static void mt_gpt_set_mode(enum clock_event_mode mode
,
591 struct clock_event_device
*evt
)
593 struct gpt_device
*dev
= id_to_dev(GPT_CLKEVT_ID
);
595 //printk("[%s]entry, mode=%d\n", __func__, mode);
597 case CLOCK_EVT_MODE_PERIODIC
:
599 __gpt_set_mode(dev
, GPT_REPEAT
);
600 __gpt_enable_irq(dev
);
601 __gpt_start_from_zero(dev
);
604 case CLOCK_EVT_MODE_ONESHOT
:
606 __gpt_set_mode(dev
, GPT_ONE_SHOT
);
607 __gpt_enable_irq(dev
);
608 __gpt_start_from_zero(dev
);
611 case CLOCK_EVT_MODE_UNUSED
:
612 case CLOCK_EVT_MODE_SHUTDOWN
:
614 __gpt_disable_irq(dev
);
616 case CLOCK_EVT_MODE_RESUME
:
621 static cycle_t
mt_gpt_read(struct clocksource
*cs
)
624 unsigned int cnt
[2] = {0, 0};
625 struct gpt_device
*dev
= id_to_dev(GPT_CLKSRC_ID
);
626 __gpt_get_cnt(dev
, cnt
);
628 cycles
= ((cycle_t
)(cnt
[1])) << 32 | (cycle_t
)(cnt
[0]);
633 static long notrace
mt_read_sched_clock(void)
635 return mt_gpt_read(NULL
);
638 static void mt_gpt_init(void);
639 struct mt_clock mt6582_gpt
=
643 .name
= "mt6582-gpt",
644 .features
= CLOCK_EVT_FEAT_ONESHOT
,
647 .set_next_event
= mt_gpt_set_next_event
,
648 .set_mode
= mt_gpt_set_mode
,
652 .name
= "mt6582-gpt",
655 .mask
= CLOCKSOURCE_MASK(32),
657 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
661 .name
= "mt6582-gpt",
662 .flags
= IRQF_DISABLED
| IRQF_TIMER
| IRQF_IRQPOLL
|IRQF_TRIGGER_LOW
,
663 .handler
= gpt_handler
,
664 .dev_id
= &mt6582_gpt
.clockevent
,
665 .irq
= MT6582_APARM_GPTTIMER_IRQ_LINE
,
667 .init_func
= mt_gpt_init
,
670 static void clkevt_handler(unsigned long data
)
672 struct clock_event_device
*evt
= (struct clock_event_device
*)data
;
673 evt
->event_handler(evt
);
676 static inline void setup_clkevt(void)
679 struct clock_event_device
*evt
= &mt6582_gpt
.clockevent
;
680 struct gpt_device
*dev
= id_to_dev(GPT_CLKEVT_ID
);
682 evt
->mult
= div_sc(SYS_CLK_RATE
, NSEC_PER_SEC
, evt
->shift
);
683 evt
->max_delta_ns
= clockevent_delta2ns(0xffffffff, evt
);
684 evt
->min_delta_ns
= clockevent_delta2ns(3, evt
);
685 evt
->cpumask
= cpumask_of(0);
686 #ifndef CONFIG_MT8127_FPGA
688 setup_gpt_dev_locked(dev
, GPT_REPEAT
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
689 SYS_CLK_RATE
/ HZ
, clkevt_handler
, GPT_ISR
);
691 setup_gpt_dev_locked(dev
, GPT_REPEAT
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
692 SYS_CLK_RATE
/ HZ
, clkevt_handler
, GPT_ISR
);
695 __gpt_get_cmp(dev
, &cmp
);
696 printk("GPT1_CMP = %d, HZ = %d\n", cmp
, HZ
);
699 #ifndef CONFIG_CLKSRC_64_BIT
700 static inline void setup_clksrc(void)
702 struct clocksource
*cs
= &mt6582_gpt
.clocksource
;
703 struct gpt_device
*dev
= id_to_dev(GPT_CLKSRC_ID
);
705 cs
->mult
= clocksource_hz2mult(SYS_CLK_RATE
, cs
->shift
);
707 setup_gpt_dev_locked(dev
, GPT_FREE_RUN
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
709 sched_clock_register((void *)mt_read_sched_clock
, 32, SYS_CLK_RATE
);
713 static u32 notrace
jiffy_sched_clock_read(void)
715 return (u32
)(jiffies
- INITIAL_JIFFIES
);
718 static inline u64
cyc_to_ns(u64 cyc
, u32 mult
, u32 shift
)
720 return (cyc
* mult
) >> shift
;
723 static u32 g_clksrc_init
=0;
725 unsigned long long notrace
sched_clock(void)
727 struct clocksource
*cs
= &mt6582_gpt
.clocksource
;
730 if(0 == g_clksrc_init
)
732 return jiffy_sched_clock_read();
736 cycles
= mt_gpt_read(cs
);
738 #ifndef CONFIG_MT8127_FPGA
739 do_div(cycles
, 13 << 1);
741 do_div(cycles
, 6 << 1);
744 //return cyc_to_ns(cycles,cs->mult, cs->shift);
748 static inline void setup_clksrc(void)
750 struct clocksource
*cs
= &mt6582_gpt
.clocksource
;
751 struct gpt_device
*dev
= id_to_dev(GPT_CLKSRC_ID
);
753 cs
->mult
= clocksource_hz2mult(SYS_CLK_RATE
, cs
->shift
);
754 #ifndef CONFIG_MT8127_FPGA
756 setup_gpt_dev_locked(dev
, GPT_FREE_RUN
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
760 setup_gpt_dev_locked(dev
, GPT_FREE_RUN
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
768 #ifdef CONFIG_HAVE_SYSCNT
769 static inline void setup_syscnt(void)
771 struct gpt_device
*dev
= id_to_dev(GPT_SYSCNT_ID
);
773 #ifndef CONFIG_MT8127_FPGA
775 setup_gpt_dev_locked(dev
, GPT_FREE_RUN
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
779 setup_gpt_dev_locked(dev
, GPT_FREE_RUN
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
783 printk("fwq sysc count \n");
786 static inline void setup_syscnt(void) {}
789 #if defined(CONFIG_HAVE_SYSCNT) && defined(CONFIG_SYSCNT_ASSIST)
791 #define read_cntpct(cntpct_lo, cntpct_hi) \
793 __asm__ __volatile__( \
794 "MRRC p15, 0, %0, %1, c14\n" \
795 :"=r"(cntpct_lo), "=r"(cntpct_hi) \
801 #define CHECK_WARNING_TIMERS 10
803 static unsigned int loop
= 0;
805 static void syscnt_assist_handler(unsigned long data
)
807 unsigned int assist_cnt
;
808 unsigned int syscnt_cnt
[2] = {0};
811 unsigned int pct_lo
, pct_hi
;
815 struct gpt_device
*assist_dev
= id_to_dev(GPT_SYSCNT_ASSIST_ID
);
816 struct gpt_device
*syscnt_dev
= id_to_dev(GPT_SYSCNT_ID
);
818 __gpt_get_cnt(assist_dev
, &assist_cnt
);
819 __gpt_get_cnt(syscnt_dev
, syscnt_cnt
);
825 cnth
= DRV_Reg32(syscnt_dev
->base_addr
+ GPT_CNTH
);
826 if ((cnt
/ CHECK_WARNING_TIMERS
) && !(cnt
% CHECK_WARNING_TIMERS
)) {
827 printk("[%s]WARNING: fail to sync GPT_CNTH!! assist(0x%08x),"
828 "syscnt(0x%08x,0x%08x),cnth(0x%08x),loop(0x%08x),cnt(%d)\n",
829 __func__
, assist_cnt
, syscnt_cnt
[0], syscnt_cnt
[1],
832 } while (cnth
!= loop
);
834 read_cntpct(pct_lo
, pct_hi
);
835 WARN_ON(pct_hi
!= loop
);
837 printk("[%s]syscnt assist IRQ!! assist(0x%08x),syscnt(0x%08x,0x%08x),"
838 "cnth:pct_hi:loop(0x%08x,0x%08x,0x%08x),cnt(%d)\n", __func__
,
839 assist_cnt
, syscnt_cnt
[0], syscnt_cnt
[1], cnth
, pct_hi
, loop
, cnt
);
842 static void syscnt_assist_resume(void)
844 unsigned int old_loop
;
845 unsigned int assist_cnt1
, assist_cnt2
;
846 unsigned int syscnt_cnt
[2] = {0};
848 struct gpt_device
*assist_dev
= id_to_dev(GPT_SYSCNT_ASSIST_ID
);
849 struct gpt_device
*syscnt_dev
= id_to_dev(GPT_SYSCNT_ID
);
852 __gpt_get_cnt(assist_dev
, &assist_cnt1
);
853 __gpt_get_cnt(syscnt_dev
, syscnt_cnt
);
854 __gpt_ack_irq(assist_dev
);
855 __gpt_get_cnt(assist_dev
, &assist_cnt2
);
856 } while (assist_cnt1
> assist_cnt2
);
859 loop
= syscnt_cnt
[1];
861 printk("[%s]assist(0x%08x, 0x%08x),syscnt(0x%08x,0x%08x),loop(%u->%u)\n",
862 __func__
, assist_cnt1
, assist_cnt2
, syscnt_cnt
[0], syscnt_cnt
[1],
866 static struct syscore_ops syscnt_assist_syscore_ops
= {
867 .resume
= syscnt_assist_resume
,
870 static int __init
syscnt_assist_init_ops(void)
872 register_syscore_ops(&syscnt_assist_syscore_ops
);
876 static inline void setup_syscnt_assist(void)
878 struct gpt_device
*dev
= id_to_dev(GPT_SYSCNT_ASSIST_ID
);
880 setup_gpt_dev_locked(dev
, GPT_REPEAT
, GPT_CLK_SRC_SYS
, GPT_CLK_DIV_1
,
881 0xFFFFFFFF, syscnt_assist_handler
, GPT_ISR
| GPT_NOAUTOEN
);
883 syscnt_assist_init_ops();
886 static inline void start_syscnt_assist(void)
888 struct gpt_device
*dev
= id_to_dev(GPT_SYSCNT_ASSIST_ID
);
894 static inline void setup_syscnt_assist(void) {}
895 static inline void start_syscnt_assist(void) {}
898 static void mt_gpt_init(void)
901 unsigned long save_flags
;
902 boot_time_value
= xgpt_boot_up_time(); /*record the time when init GPT*/
904 gpt_update_lock(save_flags
);
908 for (i
= 0; i
< NR_GPTS
; i
++) {
909 __gpt_reset(&gpt_devs
[i
]);
915 #if 1 //fix me after bring up
917 // if (CHIP_SW_VER_01 <= mt_get_chip_sw_ver()) {
918 // setup_syscnt_assist();
923 #if 1 //fix me after bring up
925 // if (CHIP_SW_VER_01 <= mt_get_chip_sw_ver()) {
926 // start_syscnt_assist();
929 gpt_update_unlock(save_flags
);
932 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
933 static int gpt_stat_read(char *page
, char **start
, off_t off
,
934 int count
, int *eof
, void *data
)
942 p
+= sprintf(p
, "\n(HW Timer) GPT Status :\n");
943 p
+= sprintf(p
, "=========================================\n");
945 for (i
= 0; i
< NR_GPTS
; i
++) {
946 in_use
= gpt_devs
[i
].flags
& GPT_IN_USE
;
947 is_counting
= gpt_is_counting(i
);
948 p
+= sprintf(p
, "[GPT%d]in_use:%s, is_counting:%s\n", i
+1,
949 in_use
? "Y" : "N", is_counting
? "Y" : "N");
962 return len
< count
? len
: count
;
965 static int gpt_stat_read_show(struct seq_file
*m
, void *v
)
971 seq_printf(m
, "\n(HW Timer) GPT Status :\n");
972 seq_printf(m
, "=========================================\n");
973 for (i
= 0; i
< NR_GPTS
; i
++) {
974 in_use
= gpt_devs
[i
].flags
& GPT_IN_USE
;
975 is_counting
= gpt_is_counting(i
);
976 seq_printf(m
, "[GPT%d]in_use:%s, is_counting:%s\n", i
+1,
977 in_use
? "Y" : "N", is_counting
? "Y" : "N");
983 static int gpt_stat_read_open(struct inode
*inode
, struct file
*file
)
985 return single_open(file
, gpt_stat_read_show
, NULL
);
988 static const struct file_operations gpt_stat_read_fops
= {
989 .open
= gpt_stat_read_open
,
992 .release
= seq_release
,
995 static int __init
gpt_mod_init(void)
997 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
998 create_proc_read_entry("gpt_stat", S_IRUGO
, NULL
, gpt_stat_read
, NULL
);
1000 proc_create("gpt_stat", S_IRUGO
, NULL
, &gpt_stat_read_fops
);
1004 #ifndef CONFIG_MT8127_FPGA
1006 printk("GPT: chipver=%d\n", mt_get_chip_sw_ver());
1008 printk("GPT: FPGA2\n" );
1012 printk("GPT: iniit\n" );
1016 module_init(gpt_mod_init
);
1018 MODULE_DESCRIPTION("MT6582 GPT Driver v0.1");
1019 MODULE_LICENSE("GPL");