2 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
4 * Boojin Kim <boojin.kim@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_address.h>
19 #include <linux/of_reserved_mem.h>
20 #include <linux/platform_device.h>
21 #include <linux/debugfs.h>
22 #include <linux/interrupt.h>
23 #include <linux/workqueue.h>
24 #include <linux/iio/iio.h>
25 #include <linux/wakelock.h>
26 #include <linux/delay.h>
27 #include <linux/firmware.h>
28 #include <linux/random.h>
29 #include <linux/rtc.h>
30 #include <linux/clk.h>
31 #include <linux/clk-provider.h>
32 #include <linux/timekeeping.h>
33 #include <linux/of_gpio.h>
34 #include <linux/fcntl.h>
35 #include <uapi/linux/sched/types.h>
37 #ifdef CONFIG_EXYNOS_ITMON
38 #include <soc/samsung/exynos-itmon.h>
41 #ifdef CONFIG_CHRE_SENSORHUB_HAL
49 #include "../../soc/samsung/cal-if/pmucal_shub.h"
51 #define WAIT_TIMEOUT_MS (1000)
52 enum { CHUB_ON
, CHUB_OFF
};
53 enum { C2A_ON
, C2A_OFF
};
55 const char *os_image
[SENSOR_VARIATION
] = {
67 #define USE_NO_PANIC_ON_POWERON /* hack for panic */
68 static DEFINE_MUTEX(reset_mutex
);
69 static DEFINE_MUTEX(pmu_shutdown_mutex
);
70 static DEFINE_MUTEX(log_mutex
);
71 static DEFINE_MUTEX(wt_mutex
);
73 void chub_wake_event(struct chub_alive
*event
)
75 atomic_set(&event
->flag
, 1);
76 wake_up_interruptible_sync(&event
->event
);
79 static int chub_wait_event(struct chub_alive
*event
)
81 atomic_set(&event
->flag
, 0);
82 return wait_event_interruptible_timeout(event
->event
,
83 atomic_read(&event
->flag
),
84 msecs_to_jiffies(WAIT_TIMEOUT_MS
* 2));
87 static int contexthub_get_token(struct contexthub_ipc_info
*ipc
)
89 if (atomic_read(&ipc
->in_reset
))
92 atomic_inc(&ipc
->in_use_ipc
);
96 static void contexthub_put_token(struct contexthub_ipc_info
*ipc
)
98 atomic_dec(&ipc
->in_use_ipc
);
101 /* host interface functions */
102 int contexthub_is_run(struct contexthub_ipc_info
*ipc
)
107 #ifdef CONFIG_CHRE_SENSORHUB_HAL
108 return nanohub_irq1_fired(ipc
->data
);
114 /* request contexthub to host driver */
115 int contexthub_request(struct contexthub_ipc_info
*ipc
)
120 #ifdef CONFIG_CHRE_SENSORHUB_HAL
121 return request_wakeup_timeout(ipc
->data
, WAIT_TIMEOUT_MS
);
127 /* rlease contexthub to host driver */
128 void contexthub_release(struct contexthub_ipc_info
*ipc
)
133 #ifdef CONFIG_CHRE_SENSORHUB_HAL
134 release_wakeup(ipc
->data
);
138 static inline void contexthub_notify_host(struct contexthub_ipc_info
*ipc
)
140 #ifdef CONFIG_CHRE_SENSORHUB_HAL
141 nanohub_handle_irq1(ipc
->data
);
147 #ifdef CONFIG_CHRE_SENSORHUB_HAL
148 /* by nanohub kernel RxBufStruct. packet header is 10 + 2 bytes to align */
152 u8 buf
[PACKET_SIZE_MAX
];
156 static int nanohub_mailbox_open(void *data
)
161 static void nanohub_mailbox_close(void *data
)
166 static int nanohub_mailbox_write(void *data
, uint8_t *tx
, int length
,
169 struct nanohub_data
*ipc
= data
;
171 return contexthub_ipc_write(ipc
->pdata
->mailbox_client
, tx
, length
, timeout
);
174 static int nanohub_mailbox_read(void *data
, uint8_t *rx
, int max_length
,
177 struct nanohub_data
*ipc
= data
;
179 return contexthub_ipc_read(ipc
->pdata
->mailbox_client
, rx
, max_length
, timeout
);
182 void nanohub_mailbox_comms_init(struct nanohub_comms
*comms
)
185 comms
->timeout_write
= 544;
186 comms
->timeout_ack
= 272;
187 comms
->timeout_reply
= 512;
188 comms
->open
= nanohub_mailbox_open
;
189 comms
->close
= nanohub_mailbox_close
;
190 comms
->write
= nanohub_mailbox_write
;
191 comms
->read
= nanohub_mailbox_read
;
195 static int contexthub_read_process(uint8_t *rx
, u8
*raw_rx
, u32 size
)
197 #ifdef CONFIG_CHRE_SENSORHUB_HAL
198 struct rxbuf
*rxstruct
;
199 struct nanohub_packet
*packet
;
201 rxstruct
= (struct rxbuf
*)raw_rx
;
202 packet
= (struct nanohub_packet
*)&rxstruct
->pre_preamble
;
203 memcpy_fromio(rx
, (void *)packet
, size
);
205 return NANOHUB_PACKET_SIZE(packet
->len
);
207 memcpy_fromio(rx
, (void *)raw_rx
, size
);
212 static int contexthub_ipc_drv_init(struct contexthub_ipc_info
*chub
)
214 struct device
*chub_dev
= chub
->dev
;
217 chub
->ipc_map
= ipc_get_chub_map();
223 chub
->ipc_map
->logbuf
.logbuf
.eq
= 0;
224 chub
->ipc_map
->logbuf
.logbuf
.dq
= 0;
225 chub
->fw_log
= log_register_buffer(chub_dev
, 0,
226 (void *)&chub
->ipc_map
->logbuf
.logbuf
,
231 if (chub
->irq_pin_len
) {
234 for (i
= 0; i
< chub
->irq_pin_len
; i
++) {
235 u32 irq
= gpio_to_irq(chub
->irq_pins
[i
]);
237 disable_irq_nosync(irq
);
238 dev_info(chub_dev
, "%s: %d irq (pin:%d) is for chub. disable it\n",
239 __func__
, irq
, chub
->irq_pins
[i
]);
243 #ifdef LOWLEVEL_DEBUG
244 chub
->dd_log_buffer
= vmalloc(SZ_256K
+ sizeof(struct LOG_BUFFER
*));
245 chub
->dd_log_buffer
->index_reader
= 0;
246 chub
->dd_log_buffer
->index_writer
= 0;
247 chub
->dd_log_buffer
->size
= SZ_256K
;
249 log_register_buffer(chub_dev
, 1, chub
->dd_log_buffer
, "dd", 0);
251 ret
= chub_dbg_init(chub
, chub
->chub_rt_log
.buffer
, chub
->chub_rt_log
.buffer_size
);
253 dev_err(chub_dev
, "%s: fails. ret:%d\n", __func__
, ret
);
258 #ifdef PACKET_LOW_DEBUG
259 static void debug_dumpbuf(unsigned char *buf
, int len
)
261 print_hex_dump(KERN_CONT
, "", DUMP_PREFIX_OFFSET
, 16, 1, buf
, len
,
266 static inline int get_recv_channel(struct recv_ctrl
*recv
)
269 unsigned long min_order
= 0;
270 int min_order_evt
= INVAL_CHANNEL
;
272 for (i
= 0; i
< IPC_BUF_NUM
; i
++) {
273 if (recv
->container
[i
]) {
275 min_order
= recv
->container
[i
];
277 } else if (recv
->container
[i
] < min_order
) {
278 min_order
= recv
->container
[i
];
284 if (min_order_evt
!= INVAL_CHANNEL
)
285 recv
->container
[min_order_evt
] = 0;
287 return min_order_evt
;
290 /* simple alive check function : don't use ipc map */
291 static bool contexthub_lowlevel_alive(struct contexthub_ipc_info
*ipc
)
295 atomic_set(&ipc
->chub_alive_lock
.flag
, 0);
296 ipc_hw_gen_interrupt(AP
, IRQ_EVT_CHUB_ALIVE
);
297 ret
= chub_wait_event(&ipc
->chub_alive_lock
);
298 dev_info(ipc
->dev
, "%s done: ret:%d\n", __func__
, ret
);
299 return atomic_read(&ipc
->chub_alive_lock
.flag
);
302 #define CHUB_RESET_THOLD (10)
303 /* handle errors of chub driver and fw */
304 static void handle_debug_work(struct contexthub_ipc_info
*ipc
, enum chub_err_type err
)
307 int alive
= contexthub_lowlevel_alive(ipc
);
309 dev_info(ipc
->dev
, "%s: err:%d, alive:%d, status:%d, in-reset:%d\n",
310 __func__
, err
, alive
, __raw_readl(&ipc
->chub_status
),
311 __raw_readl(&ipc
->in_reset
));
312 if ((atomic_read(&ipc
->chub_status
) == CHUB_ST_ERR
) || !alive
)
317 #if defined(CHUB_RESET_ENABLE)
320 dev_info(ipc
->dev
, "%s: request silent reset. err:%d, alive:%d, status:%d, in-reset:%d\n",
321 __func__
, err
, alive
, __raw_readl(&ipc
->chub_status
),
322 __raw_readl(&ipc
->in_reset
));
323 ret
= contexthub_reset(ipc
, 1, err
);
325 dev_warn(ipc
->dev
, "%s: fails to reset:%d. status:%d\n",
326 __func__
, ret
, __raw_readl(&ipc
->chub_status
));
328 dev_info(ipc
->dev
, "%s: chub reset! should be recovery\n",
331 dev_info(ipc
->dev
, "%s: chub hang. wait for sensor driver reset\n",
332 __func__
, err
, alive
, __raw_readl(&ipc
->chub_status
),
333 __raw_readl(&ipc
->in_reset
));
335 atomic_set(&ipc
->chub_status
, CHUB_ST_HANG
);
340 static void contexthub_handle_debug(struct contexthub_ipc_info
*ipc
,
341 enum chub_err_type err
, bool enable_wq
)
343 dev_info(ipc
->dev
, "%s: err:%d(cnt:%d), enable_wq:%d\n",
344 __func__
, err
, ipc
->err_cnt
[err
], enable_wq
);
346 /* set status in CHUB_ST_ERR */
347 if ((err
== CHUB_ERR_ITMON
) || (err
== CHUB_ERR_FW_WDT
) || (err
== CHUB_ERR_FW_FAULT
) || (err
== CHUB_ERR_CHUB_NO_RESPONSE
)) {
348 atomic_set(&ipc
->chub_status
, CHUB_ST_ERR
);
352 /* get chub-fw err */
353 if (err
== CHUB_ERR_NANOHUB
) {
354 enum ipc_debug_event fw_evt
;
356 if (contexthub_get_token(ipc
)) {
357 dev_warn(ipc
->dev
, "%s: get token\n", __func__
);
360 fw_evt
= ipc_read_debug_event(AP
);
361 if (fw_evt
== IPC_DEBUG_CHUB_FAULT
)
362 err
= CHUB_ERR_FW_FAULT
;
363 else if ((fw_evt
== IPC_DEBUG_CHUB_ASSERT
) || (fw_evt
== IPC_DEBUG_CHUB_ERROR
))
364 err
= CHUB_ERR_FW_ERROR
;
366 dev_warn(ipc
->dev
, "%s: unsupported fw_evt: %d\n", fw_evt
);
368 ipc_write_debug_event(AP
, 0);
369 contexthub_put_token(ipc
);
371 if (ipc
->err_cnt
[err
] > CHUB_RESET_THOLD
) {
372 atomic_set(&ipc
->chub_status
, CHUB_ST_ERR
);
373 ipc
->err_cnt
[err
] = 0;
374 dev_info(ipc
->dev
, "%s: err:%d(cnt:%d), enter error status\n",
375 __func__
, err
, ipc
->err_cnt
[err
]);
385 ipc
->cur_err
|= (1 << err
);
386 schedule_work(&ipc
->debug_work
);
388 handle_debug_work(ipc
, err
);
392 static void contexthub_select_os(struct contexthub_ipc_info
*ipc
)
395 u8 val
= (u8
) ipc_read_val(AP
);
397 dev_warn(ipc
->dev
, "%s os number is invalid\n");
402 strcpy(ipc
->os_name
, os_image
[val
]);
403 dev_info(ipc
->dev
, "%s selected os_name = %s\n", __func__
, ipc
->os_name
);
406 contexthub_download_image(ipc
, IPC_REG_OS
);
407 ipc_hw_write_shared_reg(AP
, ipc
->os_load
, SR_BOOT_MODE
);
408 ipc_write_val(AP
, 99);
410 msleep(WAIT_CHUB_MS
);
411 contexthub_ipc_write_event(ipc
, MAILBOX_EVT_CHUB_ALIVE
);
412 if (++trycnt
> WAIT_TRY_CNT
)
414 } while ((atomic_read(&ipc
->chub_status
) != CHUB_ST_RUN
));
416 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_RUN
)
417 dev_info(ipc
->dev
, "%s done. contexthub status is %d\n",
418 __func__
, atomic_read(&ipc
->chub_status
));
420 dev_warn(ipc
->dev
, "%s failed. contexthub status is %d\n",
421 __func__
, atomic_read(&ipc
->chub_status
));
423 dev_info(ipc
->dev
, "%s done: wakeup interrupt\n", __func__
);
424 chub_wake_event(&ipc
->poweron_lock
);
427 static DEFINE_MUTEX(dbg_mutex
);
428 static void handle_debug_work_func(struct work_struct
*work
)
430 struct contexthub_ipc_info
*ipc
=
431 container_of(work
, struct contexthub_ipc_info
, debug_work
);
434 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_POWER_ON
&& ipc
->sel_os
== false) {
435 contexthub_select_os(ipc
);
439 dev_info(ipc
->dev
, "%s: cur_err:0x%x, chub_stats:%d\n", __func__
, ipc
->cur_err
, atomic_read(&ipc
->chub_status
));
440 for (i
= 0; i
< CHUB_ERR_MAX
; i
++) {
441 if (ipc
->cur_err
& (1 << i
)) {
442 dev_info(ipc
->dev
, "%s: loop: err:%d, cur_err:0x%x\n", __func__
, i
, ipc
->cur_err
);
443 ipc
->cur_err
&= ~(1 << i
);
444 handle_debug_work(ipc
, i
);
449 void contexthub_print_rtlog(struct contexthub_ipc_info
*ipc
, bool loop
)
451 if (!atomic_read(&ipc
->log_work_active
)) {
452 if (contexthub_get_token(ipc
)) {
453 dev_warn(ipc
->dev
, "%s: get token\n", __func__
);
456 if (ipc_logbuf_outprint(&ipc
->chub_rt_log
, loop
))
457 chub_dbg_dump_hw(ipc
, CHUB_ERR_NANOHUB
);
458 contexthub_put_token(ipc
);
462 static void handle_log_work_func(struct work_struct
*work
)
464 struct contexthub_ipc_info
*ipc
=
465 container_of(work
, struct contexthub_ipc_info
, log_work
);
469 if (contexthub_get_token(ipc
)) {
470 chub_wait_event(&ipc
->reset_lock
);
475 atomic_set(&ipc
->log_work_active
, 0);
478 ipc_logbuf_flush_on(1);
479 mutex_lock(&log_mutex
);
480 if (ipc_logbuf_outprint(&ipc
->chub_rt_log
, 100))
481 chub_dbg_dump_hw(ipc
, CHUB_ERR_NANOHUB
);
482 mutex_unlock(&log_mutex
);
483 ipc_logbuf_flush_on(0);
484 contexthub_put_token(ipc
);
485 atomic_set(&ipc
->log_work_active
, 0);
488 static inline void clear_err_cnt(struct contexthub_ipc_info
*ipc
, enum chub_err_type err
)
490 if (ipc
->err_cnt
[err
])
491 ipc
->err_cnt
[err
] = 0;
494 int contexthub_ipc_read(struct contexthub_ipc_info
*ipc
, uint8_t *rx
, int max_length
,
501 u64 time
= 0; /* for debug */
503 if (!atomic_read(&ipc
->read_lock
.cnt
)) {
504 time
= sched_clock();
506 spin_lock_irqsave(&ipc
->read_lock
.event
.lock
, flag
);
507 atomic_inc(&ipc
->read_lock
.flag
);
509 wait_event_interruptible_timeout_locked(ipc
->read_lock
.event
,
510 atomic_read(&ipc
->read_lock
.cnt
),
511 msecs_to_jiffies(timeout
));
512 atomic_dec(&ipc
->read_lock
.flag
);
513 spin_unlock_irqrestore(&ipc
->read_lock
.event
.lock
, flag
);
516 "fails to get read ret:%d timeout:%d\n", ret
, timeout
);
519 if (__raw_readl(&ipc
->chub_status
) != CHUB_ST_RUN
) {
520 dev_warn(ipc
->dev
, "%s: chub isn't run:%d\n", __raw_readl(&ipc
->chub_status
));
524 if (contexthub_get_token(ipc
)) {
525 dev_warn(ipc
->dev
, "no-active: read fails\n");
529 if (atomic_read(&ipc
->read_lock
.cnt
)) {
530 rxbuf
= ipc_read_data(IPC_DATA_C2A
, &size
);
532 ret
= contexthub_read_process(rx
, rxbuf
, size
);
533 atomic_dec(&ipc
->read_lock
.cnt
);
536 dev_dbg(ipc
->dev
, "%s: read timeout(%d): c2aq_cnt:%d, recv_cnt:%d during %lld ns\n",
537 __func__
, ipc
->err_cnt
[CHUB_ERR_READ_FAIL
],
538 ipc_get_data_cnt(IPC_DATA_C2A
), atomic_read(&ipc
->read_lock
.cnt
),
539 sched_clock() - time
);
540 if (ipc_get_data_cnt(IPC_DATA_C2A
)) {
541 ipc
->err_cnt
[CHUB_ERR_READ_FAIL
]++;
546 contexthub_put_token(ipc
);
550 int contexthub_ipc_write(struct contexthub_ipc_info
*ipc
,
551 uint8_t *tx
, int length
, int timeout
)
555 if (__raw_readl(&ipc
->chub_status
) != CHUB_ST_RUN
) {
556 dev_warn(ipc
->dev
, "%s: chub isn't run:%d\n", __raw_readl(&ipc
->chub_status
));
560 if (contexthub_get_token(ipc
)) {
561 dev_warn(ipc
->dev
, "no-active: write fails\n");
565 mutex_lock(&wt_mutex
);
566 ret
= ipc_write_data(IPC_DATA_A2C
, tx
, (u16
)length
);
567 mutex_unlock(&wt_mutex
);
568 contexthub_put_token(ipc
);
570 pr_err("%s: fails to write data: ret:%d, len:%d errcnt:%d\n",
571 __func__
, ret
, length
, ipc
->err_cnt
[CHUB_ERR_WRITE_FAIL
]);
572 contexthub_handle_debug(ipc
, CHUB_ERR_WRITE_FAIL
, 0);
575 clear_err_cnt(ipc
, CHUB_ERR_WRITE_FAIL
);
580 static void check_rtc_time(void)
582 struct rtc_device
*chub_rtc
= rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE
);
583 struct rtc_device
*ap_rtc
= rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE
);
584 struct rtc_time chub_tm
, ap_tm
;
585 time64_t chub_t
, ap_t
;
587 rtc_read_time(ap_rtc
, &chub_tm
);
588 rtc_read_time(chub_rtc
, &ap_tm
);
590 chub_t
= rtc_tm_sub(&chub_tm
, &ap_tm
);
593 pr_info("nanohub %s: diff_time: %llu\n", __func__
, chub_t
);
594 rtc_set_time(chub_rtc
, &ap_tm
);
597 chub_t
= rtc_tm_to_time64(&chub_tm
);
598 ap_t
= rtc_tm_to_time64(&ap_tm
);
601 static int contexthub_hw_reset(struct contexthub_ipc_info
*ipc
,
602 enum mailbox_event event
)
609 dev_info(ipc
->dev
, "%s. status:%d\n",
610 __func__
, __raw_readl(&ipc
->chub_status
));
612 /* clear ipc value */
613 atomic_set(&ipc
->wakeup_chub
, CHUB_OFF
);
614 atomic_set(&ipc
->irq1_apInt
, C2A_OFF
);
615 atomic_set(&ipc
->read_lock
.cnt
, 0);
616 atomic_set(&ipc
->read_lock
.flag
, 0);
617 atomic_set(&ipc
->log_work_active
, 0);
620 for (i
= 0; i
< CHUB_ERR_MAX
; i
++) {
621 if (i
== CHUB_ERR_RESET_CNT
)
626 ipc_hw_write_shared_reg(AP
, ipc
->os_load
, SR_BOOT_MODE
);
627 ipc_set_chub_clk((u32
)ipc
->clkrate
);
628 ipc
->chub_rt_log
.loglevel
= CHUB_RT_LOG_DUMP_PRT
;
629 ipc_set_chub_bootmode(BOOTMODE_COLD
, ipc
->chub_rt_log
.loglevel
);
631 case MAILBOX_EVT_POWER_ON
:
632 #ifdef NEED_TO_RTC_SYNC
635 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_NO_POWER
) {
636 atomic_set(&ipc
->chub_status
, CHUB_ST_POWER_ON
);
638 /* enable Dump gpr */
639 IPC_HW_WRITE_DUMPGPR_CTRL(ipc
->chub_dumpgpr
, 0x1);
641 #if defined(CONFIG_SOC_EXYNOS9610)
642 /* cmu cm4 clock - gating */
643 val
= __raw_readl(ipc
->cmu_chub_qch
+
644 REG_QCH_CON_CM4_SHUB_QCH
);
645 val
&= ~(IGNORE_FORCE_PM_EN
| CLOCK_REQ
| ENABLE
);
646 __raw_writel((val
| IGNORE_FORCE_PM_EN
),
648 REG_QCH_CON_CM4_SHUB_QCH
);
650 /* pmu reset-release on CHUB */
651 val
= __raw_readl(ipc
->pmu_chub_reset
+
652 REG_CHUB_RESET_CHUB_OPTION
);
653 __raw_writel((val
| CHUB_RESET_RELEASE_VALUE
),
654 ipc
->pmu_chub_reset
+
655 REG_CHUB_RESET_CHUB_OPTION
);
657 #if defined(CONFIG_SOC_EXYNOS9610)
658 /* check chub cpu status */
660 val
= __raw_readl(ipc
->pmu_chub_reset
+
661 REG_CHUB_RESET_CHUB_CONFIGURATION
);
662 msleep(WAIT_TIMEOUT_MS
);
663 if (++trycnt
> WAIT_TRY_CNT
) {
665 "chub cpu status is not set correctly\n");
668 } while ((val
& 0x1) == 0x0);
670 /* cmu cm4 clock - release */
671 val
= __raw_readl(ipc
->cmu_chub_qch
+
672 REG_QCH_CON_CM4_SHUB_QCH
);
673 val
&= ~(IGNORE_FORCE_PM_EN
| CLOCK_REQ
| ENABLE
);
674 __raw_writel((val
| IGNORE_FORCE_PM_EN
| CLOCK_REQ
),
676 REG_QCH_CON_CM4_SHUB_QCH
);
678 val
= __raw_readl(ipc
->cmu_chub_qch
+
679 REG_QCH_CON_CM4_SHUB_QCH
);
680 val
&= ~(IGNORE_FORCE_PM_EN
| CLOCK_REQ
| ENABLE
);
681 __raw_writel((val
| CLOCK_REQ
),
683 REG_QCH_CON_CM4_SHUB_QCH
);
688 "fails to contexthub power on. Status is %d\n",
689 atomic_read(&ipc
->chub_status
));
692 case MAILBOX_EVT_RESET
:
693 ret
= pmucal_shub_reset_release();
699 if (ipc
->sel_os
== false) {
700 dev_info(ipc
->dev
, "%s -> os select\n", __func__
);
708 dev_info(ipc
->dev
, "%s: alive check\n", __func__
);
711 msleep(WAIT_CHUB_MS
);
712 contexthub_ipc_write_event(ipc
, MAILBOX_EVT_CHUB_ALIVE
);
713 if (++trycnt
> WAIT_TRY_CNT
)
715 } while ((atomic_read(&ipc
->chub_status
) != CHUB_ST_RUN
));
717 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_RUN
) {
718 dev_info(ipc
->dev
, "%s done. contexthub status is %d\n",
719 __func__
, atomic_read(&ipc
->chub_status
));
722 dev_warn(ipc
->dev
, "%s fails. contexthub status is %d\n",
723 __func__
, atomic_read(&ipc
->chub_status
));
729 static void contexthub_config_init(struct contexthub_ipc_info
*chub
)
731 /* BAAW-P-APM-CHUB for CHUB to access APM_CMGP. 1 window is used */
732 if (chub
->chub_baaw
) {
733 IPC_HW_WRITE_BAAW_CHUB0(chub
->chub_baaw
,
734 chub
->baaw_info
.baaw_p_apm_chub_start
);
735 IPC_HW_WRITE_BAAW_CHUB1(chub
->chub_baaw
,
736 chub
->baaw_info
.baaw_p_apm_chub_end
);
737 IPC_HW_WRITE_BAAW_CHUB2(chub
->chub_baaw
,
738 chub
->baaw_info
.baaw_p_apm_chub_remap
);
739 IPC_HW_WRITE_BAAW_CHUB3(chub
->chub_baaw
, BAAW_RW_ACCESS_ENABLE
);
742 /* enable mailbox ipc */
743 ipc_set_base(chub
->sram
);
744 ipc_set_owner(AP
, chub
->mailbox
, IPC_SRC
);
746 #define os_name_idx (11)
748 int contexthub_get_sensortype(struct contexthub_ipc_info
*ipc
, char *buf
)
750 struct sensor_map
*sensor_map
;
751 struct saved_setting
*pack
= (struct saved_setting
*) buf
;
755 unsigned int *tmp
= (unsigned int *)pack
;
758 if (atomic_read(&ipc
->chub_status
) != CHUB_ST_RUN
) {
759 dev_warn(ipc
->dev
, "%s :fails chub isn't active, status:%d, inreset:%d\n",
760 __func__
, atomic_read(&ipc
->chub_status
), atomic_read(&ipc
->in_reset
));
764 ret
= contexthub_get_token(ipc
);
767 msleep(WAIT_CHUB_MS
);
768 if (++trycnt
> WAIT_TRY_CNT
)
770 ret
= contexthub_get_token(ipc
);
774 dev_warn(ipc
->dev
, "%s fails to get token\n", __func__
);
778 sensor_map
= ipc_get_base(IPC_REG_IPC_SENSORINFO
);
779 if (ipc_have_sensor_info(sensor_map
)) {
781 pack
->num_os
= ipc
->os_name
[os_name_idx
] - '0';
782 len
= ipc_get_offset(IPC_REG_IPC_SENSORINFO
);
783 dev_info(ipc
->dev
, "%s: get sensorinfo: %p (os:%d, size:%d, %d / %d %d %d)\n", __func__
, sensor_map
, pack
->num_os
, len
, sizeof(struct saved_setting
),
784 sizeof(pack
->magic
), sizeof(pack
->num_os
), sizeof(pack
->readbuf
));
785 memcpy(&pack
->readbuf
, ipc_get_sensor_base(), len
);
786 for (i
= 0; i
< SENSOR_TYPE_MAX
; i
++)
787 if (sensor_map
->active_sensor_list
[i
])
788 dev_info(ipc
->dev
, "%s: get sensorinfo: type:%d, id:%d - %d\n", __func__
, i
, sensor_map
->active_sensor_list
[i
], pack
->readbuf
[i
]);
790 dev_err(ipc
->dev
, "%s: fails to get sensorinfo: %p\n", __func__
, sensor_map
);
792 contexthub_put_token(ipc
);
794 for (i
= 0; i
< sizeof(struct saved_setting
) / sizeof(int); i
++, tmp
++)
795 pr_info("%s: %d: 0x%x\n", __func__
, i
, *tmp
);
796 return sizeof(struct saved_setting
);
799 void contexthub_ipc_status_reset(struct contexthub_ipc_info
*ipc
)
801 /* clear ipc value */
802 atomic_set(&ipc
->wakeup_chub
, CHUB_OFF
);
803 atomic_set(&ipc
->irq1_apInt
, C2A_OFF
);
804 atomic_set(&ipc
->read_lock
.cnt
, 0x0);
805 atomic_set(&ipc
->log_work_active
, 0);
806 memset_io(ipc_get_base(IPC_REG_IPC_A2C
), 0, ipc_get_offset(IPC_REG_IPC_A2C
));
807 memset_io(ipc_get_base(IPC_REG_IPC_C2A
), 0, ipc_get_offset(IPC_REG_IPC_C2A
));
808 memset_io(ipc_get_base(IPC_REG_IPC_EVT_A2C
), 0, ipc_get_offset(IPC_REG_IPC_EVT_A2C
));
809 memset_io(ipc_get_base(IPC_REG_IPC_EVT_C2A
), 0, ipc_get_offset(IPC_REG_IPC_EVT_C2A
));
810 memset_io(ipc_get_base(IPC_REG_IPC_EVT_A2C_CTRL
), 0, ipc_get_offset(IPC_REG_IPC_EVT_A2C_CTRL
));
811 memset_io(ipc_get_base(IPC_REG_IPC_EVT_C2A_CTRL
), 0, ipc_get_offset(IPC_REG_IPC_EVT_C2A_CTRL
));
812 ipc_hw_clear_all_int_pend_reg(AP
);
813 ipc_hw_set_mcuctrl(AP
, 0x1);
816 int contexthub_ipc_write_event(struct contexthub_ipc_info
*ipc
,
817 enum mailbox_event event
)
824 case MAILBOX_EVT_INIT_IPC
:
825 ret
= contexthub_ipc_drv_init(ipc
);
827 case MAILBOX_EVT_POWER_ON
:
828 ret
= contexthub_hw_reset(ipc
, event
);
832 case MAILBOX_EVT_RESET
:
833 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_SHUTDOWN
) {
834 ret
= contexthub_hw_reset(ipc
, event
);
837 "contexthub status isn't shutdown. fails to reset\n");
841 case MAILBOX_EVT_SHUTDOWN
:
843 if (ipc
->block_reset
) {
844 /* pmu call assert */
845 ret
= pmucal_shub_reset_assert();
847 pr_err("%s: reset assert fail\n", __func__
);
851 /* pmu call reset-release_config */
852 ret
= pmucal_shub_reset_release_config();
854 pr_err("%s: reset release cfg fail\n", __func__
);
859 ret
= exynos_smc(SMC_CMD_CONN_IF
,
860 (EXYNOS_SHUB
<< 32) |
861 EXYNOS_SET_CONN_TZPC
, 0, 0);
863 pr_err("%s: TZPC setting fail\n",
867 dev_info(ipc
->dev
, "%s: tzpc setted\n", __func__
);
869 contexthub_config_init(ipc
);
871 val
= __raw_readl(ipc
->pmu_chub_reset
+
872 REG_CHUB_CPU_STATUS
);
873 if (val
& (1 << REG_CHUB_CPU_STATUS_BIT_STANDBYWFI
)) {
874 val
= __raw_readl(ipc
->pmu_chub_reset
+
875 REG_CHUB_RESET_CHUB_CONFIGURATION
);
876 __raw_writel(val
& ~(1 << 0),
877 ipc
->pmu_chub_reset
+
878 REG_CHUB_RESET_CHUB_CONFIGURATION
);
881 "fails to shutdown contexthub. cpu_status: 0x%x\n",
886 atomic_set(&ipc
->chub_status
, CHUB_ST_SHUTDOWN
);
888 case MAILBOX_EVT_CHUB_ALIVE
:
889 ipc_hw_write_shared_reg(AP
, AP_WAKE
, SR_3
);
890 val
= contexthub_lowlevel_alive(ipc
);
892 atomic_set(&ipc
->chub_status
, CHUB_ST_RUN
);
893 dev_info(ipc
->dev
, "%s : chub is alive", __func__
);
894 } else if (ipc
->sel_os
== true) {
896 "%s : chub isn't alive, should be reset. status:%d, inreset:%d\n",
897 __func__
, atomic_read(&ipc
->chub_status
), atomic_read(&ipc
->in_reset
));
898 if (!atomic_read(&ipc
->in_reset
)) {
899 #ifdef USE_NO_PANIC_ON_POWERON
900 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_POWER_ON
) {
901 atomic_set(&ipc
->chub_status
, CHUB_ST_NO_RESPONSE
);
902 /* hack don't make panic with chub poweron */
903 contexthub_reset(ipc
, 1, CHUB_ERR_NONE
);
905 atomic_set(&ipc
->chub_status
, CHUB_ST_NO_RESPONSE
);
906 contexthub_handle_debug(ipc
, CHUB_ERR_CHUB_NO_RESPONSE
, 0);
909 atomic_set(&ipc
->chub_status
, CHUB_ST_NO_RESPONSE
);
910 contexthub_handle_debug(ipc
, CHUB_ERR_CHUB_NO_RESPONSE
, 0);
913 dev_info(ipc
->dev
, "%s: skip to handle debug in reset\n", __func__
);
918 case MAILBOX_EVT_ENABLE_IRQ
:
919 /* if enable, mask from CHUB IRQ, else, unmask from CHUB IRQ */
920 ipc_hw_unmask_irq(AP
, IRQ_EVT_C2A_INT
);
921 ipc_hw_unmask_irq(AP
, IRQ_EVT_C2A_INTCLR
);
923 case MAILBOX_EVT_DISABLE_IRQ
:
924 ipc_hw_mask_irq(AP
, IRQ_EVT_C2A_INT
);
925 ipc_hw_mask_irq(AP
, IRQ_EVT_C2A_INTCLR
);
933 if (atomic_read(&ipc
->chub_status
) != CHUB_ST_RUN
) {
934 dev_warn(ipc
->dev
, "%s event:%d/%d fails chub isn't active, status:%d, inreset:%d\n",
935 __func__
, event
, MAILBOX_EVT_MAX
, atomic_read(&ipc
->chub_status
), atomic_read(&ipc
->in_reset
));
938 if (contexthub_get_token(ipc
))
943 case MAILBOX_EVT_RT_LOGLEVEL
:
944 ipc_logbuf_loglevel(ipc
->chub_rt_log
.loglevel
, 1);
946 case MAILBOX_EVT_ERASE_SHARED
:
947 memset(ipc_get_base(IPC_REG_SHARED
), 0, ipc_get_offset(IPC_REG_SHARED
));
949 case MAILBOX_EVT_DUMP_STATUS
:
950 /* dump nanohub kernel status */
951 dev_info(ipc
->dev
, "Request to dump chub fw status\n");
952 ipc_write_debug_event(AP
, (u32
)MAILBOX_EVT_DUMP_STATUS
);
953 ret
= ipc_add_evt(IPC_EVT_A2C
, IRQ_EVT_A2C_DEBUG
);
955 case MAILBOX_EVT_WAKEUP_CLR
:
956 if (atomic_read(&ipc
->wakeup_chub
) == CHUB_ON
) {
957 ret
= ipc_add_evt(IPC_EVT_A2C
, IRQ_EVT_A2C_WAKEUP_CLR
);
959 atomic_set(&ipc
->wakeup_chub
, CHUB_OFF
);
961 dev_warn(ipc
->dev
, "%s: fails to set wakeup. ret:%d", __func__
, ret
);
964 case MAILBOX_EVT_WAKEUP
:
965 if (atomic_read(&ipc
->wakeup_chub
) == CHUB_OFF
) {
966 ret
= ipc_add_evt(IPC_EVT_A2C
, IRQ_EVT_A2C_WAKEUP
);
968 atomic_set(&ipc
->wakeup_chub
, CHUB_ON
);
970 dev_warn(ipc
->dev
, "%s: fails to set wakeupclr. ret:%d", __func__
, ret
);
975 if ((int)event
< IPC_DEBUG_UTC_MAX
) {
976 ipc
->utc_run
= event
;
977 if ((int)event
== IPC_DEBUG_UTC_TIME_SYNC
)
979 ipc_write_debug_event(AP
, (u32
)event
);
980 ret
= ipc_add_evt(IPC_EVT_A2C
, IRQ_EVT_A2C_DEBUG
);
984 contexthub_put_token(ipc
);
986 /* add slient reset with write event error */
987 if ((event
== MAILBOX_EVT_WAKEUP_CLR
) || (event
== MAILBOX_EVT_WAKEUP
)) {
989 contexthub_handle_debug(ipc
, CHUB_ERR_EVTQ_ADD
, 0);
991 clear_err_cnt(ipc
, CHUB_ERR_EVTQ_ADD
);
997 int contexthub_poweron(struct contexthub_ipc_info
*ipc
)
1000 struct device
*dev
= ipc
->dev
;
1001 struct chub_bootargs
*map
;
1003 if (!atomic_read(&ipc
->chub_status
)) {
1004 memset_io(ipc
->sram
, 0, ipc
->sram_size
);
1005 ret
= contexthub_download_image(ipc
, IPC_REG_BL
);
1007 dev_warn(dev
, "fails to download bootloader\n");
1011 if (ipc_get_offset(IPC_REG_DUMP
) != ipc
->sram_size
)
1012 dev_warn(dev
, "sram size doen't match kernel:%d, fw:%d\n", ipc
->sram_size
, ipc_get_offset(IPC_REG_DUMP
));
1014 ret
= contexthub_ipc_write_event(ipc
, MAILBOX_EVT_INIT_IPC
);
1016 dev_warn(dev
, "fails to init ipc\n");
1020 if(!strcmp(ipc
->os_name
, "os.checked_0.bin") || ipc
->os_name
[0] != 'o') {
1021 map
= ipc_get_base(IPC_REG_BL_MAP
);
1022 ipc
->sel_os
= !(map
->bootmode
);
1024 dev_info(dev
, "saved os_name: %s", ipc
->os_name
);
1026 ret
= contexthub_download_image(ipc
, IPC_REG_OS
);
1028 dev_warn(dev
, "fails to download kernel\n");
1032 ret
= contexthub_ipc_write_event(ipc
, MAILBOX_EVT_POWER_ON
);
1034 dev_warn(dev
, "fails to poweron\n");
1037 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_RUN
)
1038 dev_info(dev
, "%s: contexthub power-on", __func__
);
1041 dev_warn(dev
, "contexthub failed to power-on");
1043 dev_info(dev
, "%s: wait for multi-os poweron\n", __func__
);
1044 ret
= chub_wait_event(&ipc
->poweron_lock
);
1045 dev_info(dev
, "%s: multi-os poweron %s, status:%d, ret:%d, flag:%d\n", __func__
,
1046 atomic_read(&ipc
->chub_status
) == CHUB_ST_RUN
? "success" : "fails",
1047 atomic_read(&ipc
->chub_status
), ret
, ipc
->poweron_lock
.flag
);
1055 static int contexthub_download_and_check_image(struct contexthub_ipc_info
*ipc
, enum ipc_region reg
)
1057 u32
*fw
= vmalloc(ipc_get_offset(reg
));
1061 return contexthub_download_image(ipc
, reg
);
1063 memcpy_fromio(fw
, ipc_get_base(reg
), ipc_get_offset(reg
));
1064 ret
= contexthub_download_image(ipc
, reg
);
1066 dev_err(ipc
->dev
, "%s: download bl(%d) fails\n", __func__
, reg
== IPC_REG_BL
);
1070 ret
= memcmp(fw
, ipc_get_base(reg
), ipc_get_offset(reg
));
1073 u32
*fw_image
= (u32
*)ipc_get_base(reg
);
1075 dev_err(ipc
->dev
, "%s: fw(%lx) doens't match with size %d\n",
1076 __func__
, (unsigned long)ipc_get_base(reg
), ipc_get_offset(reg
));
1077 for (i
= 0; i
< ipc_get_offset(reg
) / 4; i
++)
1078 if (fw
[i
] != fw_image
[i
]) {
1079 dev_err(ipc
->dev
, "fw[%d] %x -> wrong %x\n", i
, fw_image
[i
], fw
[i
]);
1080 print_hex_dump(KERN_CONT
, "before:", DUMP_PREFIX_OFFSET
, 16, 1, &fw
[i
], 64, false);
1081 print_hex_dump(KERN_CONT
, "after:", DUMP_PREFIX_OFFSET
, 16, 1, &fw_image
[i
], 64, false);
1087 dev_info(ipc
->dev
, "%s: download and checked bl(%d) ret:%d \n", __func__
, reg
== IPC_REG_BL
, ret
);
1092 int contexthub_reset(struct contexthub_ipc_info
*ipc
, bool force_load
, enum chub_err_type err
)
1097 dev_info(ipc
->dev
, "%s: force:%d, status:%d, in-reset:%d, err:%d, user:%d\n",
1098 __func__
, force_load
, atomic_read(&ipc
->chub_status
),
1099 atomic_read(&ipc
->in_reset
), err
, atomic_read(&ipc
->in_use_ipc
));
1100 mutex_lock(&reset_mutex
);
1101 if (!force_load
&& (atomic_read(&ipc
->chub_status
) == CHUB_ST_RUN
)) {
1102 mutex_unlock(&reset_mutex
);
1103 dev_info(ipc
->dev
, "%s: out status:%d\n", __func__
, atomic_read(&ipc
->chub_status
));
1106 atomic_inc(&ipc
->in_reset
);
1108 /* wait for ipc free */
1110 msleep(WAIT_CHUB_MS
);
1111 if (++trycnt
> RESET_WAIT_TRY_CNT
) {
1112 dev_info(ipc
->dev
, "%s: can't get lock. in_use_ipc: %d\n",
1113 __func__
, atomic_read(&ipc
->in_use_ipc
));
1117 dev_info(ipc
->dev
, "%s: wait for ipc user free: %d\n",
1118 __func__
, atomic_read(&ipc
->in_use_ipc
));
1119 } while (atomic_read(&ipc
->in_use_ipc
));
1122 chub_dbg_dump_hw(ipc
, err
);
1124 dev_info(ipc
->dev
, "%s: start reset status:%d\n", __func__
, atomic_read(&ipc
->chub_status
));
1126 if (!ipc
->block_reset
) {
1128 ipc_add_evt(IPC_EVT_A2C
, IRQ_EVT_A2C_SHUTDOWN
);
1129 msleep(100); /* wait for shut down time */
1133 mutex_lock(&pmu_shutdown_mutex
);
1134 dev_info(ipc
->dev
, "%s: enter shutdown\n", __func__
);
1135 ret
= contexthub_ipc_write_event(ipc
, MAILBOX_EVT_SHUTDOWN
);
1137 dev_err(ipc
->dev
, "%s: shutdown fails, ret:%d\n", __func__
, ret
);
1138 mutex_unlock(&pmu_shutdown_mutex
);
1141 dev_info(ipc
->dev
, "%s: out shutdown\n", __func__
);
1142 mutex_unlock(&pmu_shutdown_mutex
);
1144 /* image download */
1145 dev_info(ipc
->dev
, "%s: clear ipc:%p, %d\n", __func__
, ipc_get_base(IPC_REG_IPC
), ipc_get_offset(IPC_REG_IPC
));
1146 memset_io(ipc_get_base(IPC_REG_IPC_A2C
), 0, ipc_get_offset(IPC_REG_IPC_A2C
));
1147 memset_io(ipc_get_base(IPC_REG_IPC_C2A
), 0, ipc_get_offset(IPC_REG_IPC_C2A
));
1148 memset_io(ipc_get_base(IPC_REG_IPC_EVT_A2C
), 0, ipc_get_offset(IPC_REG_IPC_EVT_A2C
));
1149 memset_io(ipc_get_base(IPC_REG_IPC_EVT_C2A
), 0, ipc_get_offset(IPC_REG_IPC_EVT_C2A
));
1150 memset_io(ipc_get_base(IPC_REG_LOG
), 0, ipc_get_offset(IPC_REG_LOG
));
1151 if (ipc
->block_reset
|| force_load
) {
1152 ret
= contexthub_download_image(ipc
, IPC_REG_BL
);
1154 if (force_load
) /* can use new binary */
1155 ret
= contexthub_download_image(ipc
, IPC_REG_OS
);
1156 else /* use previous binary */
1157 ret
= contexthub_download_and_check_image(ipc
, IPC_REG_OS
);
1160 dev_err(ipc
->dev
, "%s: download os fails\n", __func__
);
1165 dev_err(ipc
->dev
, "%s: download bl fails\n", __func__
);
1171 ret
= contexthub_ipc_write_event(ipc
, MAILBOX_EVT_RESET
);
1173 dev_err(ipc
->dev
, "%s: reset fails, ret:%d\n", __func__
, ret
);
1175 dev_info(ipc
->dev
, "%s: chub reseted! (cnt:%d)\n",
1176 __func__
, ipc
->err_cnt
[CHUB_ERR_RESET_CNT
]);
1177 ipc
->err_cnt
[CHUB_ERR_RESET_CNT
]++;
1178 atomic_set(&ipc
->in_use_ipc
, 0);
1181 msleep(100); /* wakeup delay */
1182 chub_wake_event(&ipc
->reset_lock
);
1183 atomic_dec(&ipc
->in_reset
);
1184 mutex_unlock(&reset_mutex
);
1189 int contexthub_download_image(struct contexthub_ipc_info
*ipc
, enum ipc_region reg
)
1191 const struct firmware
*entry
;
1194 if (reg
== IPC_REG_BL
) {
1195 dev_info(ipc
->dev
, "%s: download bl\n", __func__
);
1196 ret
= request_firmware(&entry
, "bl.unchecked.bin", ipc
->dev
);
1198 else if (reg
== IPC_REG_OS
) {
1199 dev_info(ipc
->dev
, "%s: download %s\n", __func__
, ipc
->os_name
);
1200 ret
= request_firmware(&entry
, ipc
->os_name
, ipc
->dev
);
1206 dev_err(ipc
->dev
, "%s, bl(%d) request_firmware failed\n",
1207 reg
== IPC_REG_BL
, __func__
);
1210 memcpy_toio(ipc_get_base(reg
), entry
->data
, entry
->size
);
1211 dev_info(ipc
->dev
, "%s: bl:%d, bin(size:%d) on %lx\n",
1212 __func__
, reg
== IPC_REG_BL
, (int)entry
->size
, (unsigned long)ipc_get_base(reg
));
1213 release_firmware(entry
);
1218 static void handle_irq(struct contexthub_ipc_info
*ipc
, enum irq_evt_chub evt
)
1223 case IRQ_EVT_C2A_DEBUG
:
1224 err
= (ipc_read_debug_event(AP
) == IPC_DEBUG_CHUB_FAULT
) ? CHUB_ERR_FW_FAULT
: CHUB_ERR_NANOHUB
;
1225 dev_err(ipc
->dev
, "%s: c2a_debug: debug:%d, err:%d\n", __func__
, ipc_read_debug_event(AP
), err
);
1226 contexthub_handle_debug(ipc
, err
, 1);
1228 case IRQ_EVT_C2A_INT
:
1229 if (atomic_read(&ipc
->irq1_apInt
) == C2A_OFF
) {
1230 atomic_set(&ipc
->irq1_apInt
, C2A_ON
);
1231 contexthub_notify_host(ipc
);
1234 case IRQ_EVT_C2A_INTCLR
:
1235 atomic_set(&ipc
->irq1_apInt
, C2A_OFF
);
1237 case IRQ_EVT_C2A_LOG
:
1240 if (evt
< IRQ_EVT_CH_MAX
) {
1243 atomic_inc(&ipc
->read_lock
.cnt
);
1244 /* TODO: requered.. ? */
1245 spin_lock(&ipc
->read_lock
.event
.lock
);
1246 lock
= atomic_read(&ipc
->read_lock
.flag
);
1247 spin_unlock(&ipc
->read_lock
.event
.lock
);
1249 wake_up_interruptible_sync(&ipc
->read_lock
.event
);
1251 dev_warn(ipc
->dev
, "%s: invalid %d event",
1257 if (ipc_logbuf_filled() && !atomic_read(&ipc
->log_work_active
)) {
1258 ipc
->log_work_reqcnt
++; /* debug */
1259 atomic_set(&ipc
->log_work_active
, 1);
1260 schedule_work(&ipc
->log_work
);
1264 static irqreturn_t
contexthub_irq_handler(int irq
, void *data
)
1266 struct contexthub_ipc_info
*ipc
= data
;
1267 int start_index
= ipc_hw_read_int_start_index(AP
);
1268 unsigned int status
= ipc_hw_read_int_status_reg(AP
);
1269 struct ipc_evt_buf
*cur_evt
;
1270 enum chub_err_type err
= 0;
1271 enum irq_chub evt
= 0;
1272 int irq_num
= IRQ_EVT_CHUB_ALIVE
+ start_index
;
1273 u32 status_org
= status
; /* for debug */
1274 struct ipc_evt
*ipc_evt
= ipc_get_base(IPC_REG_IPC_EVT_C2A
);
1276 /* chub alive interrupt handle */
1277 if (status
& (1 << irq_num
)) {
1278 status
&= ~(1 << irq_num
);
1279 ipc_hw_clear_int_pend_reg(AP
, irq_num
);
1280 if (atomic_read(&ipc
->chub_status
) == CHUB_ST_POWER_ON
&& ipc
->sel_os
== false) {
1281 schedule_work(&ipc
->debug_work
);
1285 /* set wakeup flag for chub_alive_lock */
1286 chub_wake_event(&ipc
->chub_alive_lock
);
1288 irq_num
= IRQ_EVT_C2A_LOG
+ start_index
;
1289 if (status
& (1 << irq_num
)) {
1290 status
&= ~(1 << irq_num
);
1291 ipc_hw_clear_int_pend_reg(AP
, irq_num
);
1292 handle_irq(ipc
, IRQ_EVT_C2A_LOG
);
1295 #ifdef CHECK_HW_TRIGGER
1296 /* chub ipc interrupt handle */
1298 cur_evt
= ipc_get_evt(IPC_EVT_C2A
);
1302 irq_num
= cur_evt
->irq
+ start_index
;
1304 if (!ipc_evt
->ctrl
.pending
[cur_evt
->irq
])
1306 ("%s: no-sw-trigger irq:%d(%d+%d), evt:%d, status:0x%x->0x%x(SR:0x%x)\n", __func__
,
1307 irq_num
, cur_evt
->irq
, start_index
, evt
, status_org
, status
, ipc_hw_read_int_status_reg(AP
));
1309 /* check match evtq and hw interrupt pending */
1310 if (!(status
& (1 << irq_num
))) {
1311 err
= CHUB_ERR_EVTQ_NO_HW_TRIGGER
;
1313 ("%s: no-hw-trigger irq:%d(%d+%d), evt:%d, status:0x%x->0x%x(SR:0x%x)\n", __func__
,
1314 irq_num
, cur_evt
->irq
, start_index
, evt
, status_org
, status
, ipc_hw_read_int_status_reg(AP
));
1317 err
= CHUB_ERR_EVTQ_EMTPY
;
1319 ("%s: evt-empty irq:%d(%d), evt:%d, status:0x%x->0x%x(SR:0x%x)\n", __func__
,
1320 irq_num
, start_index
, evt
, status_org
, status
, ipc_hw_read_int_status_reg(AP
));
1323 ipc_hw_clear_int_pend_reg(AP
, irq_num
);
1324 ipc_evt
->ctrl
.pending
[cur_evt
->irq
] = 0;
1325 handle_irq(ipc
, (u32
)evt
);
1326 status
&= ~(1 << irq_num
);
1332 for (i
= start_index
; i
< irq_num
; i
++) {
1334 if (status
& (1 << i
)) {
1335 cur_evt
= ipc_get_evt(IPC_EVT_C2A
);
1338 handle_irq(ipc
, (u32
)evt
);
1339 ipc_hw_clear_int_pend_reg(AP
, i
);
1341 err
= CHUB_ERR_EVTQ_EMTPY
;
1350 dev_err(ipc
->dev
, "nanohub: inval irq err(%d):start_irqnum:%d,evt(%p):%d,irq_hw:%d,status_reg:0x%x->0x%x(0x%x,0x%x)\n",
1351 err
, start_index
, cur_evt
, evt
, irq_num
,
1352 status_org
, status
, ipc_hw_read_int_status_reg(AP
),
1353 ipc_hw_read_int_gen_reg(AP
));
1354 ipc_hw_clear_all_int_pend_reg(AP
);
1355 contexthub_handle_debug(ipc
, err
, 1);
1357 clear_err_cnt(ipc
, CHUB_ERR_EVTQ_EMTPY
);
1358 clear_err_cnt(ipc
, CHUB_ERR_EVTQ_NO_HW_TRIGGER
);
1363 #if defined(CHUB_RESET_ENABLE)
1364 static irqreturn_t
contexthub_irq_wdt_handler(int irq
, void *data
)
1366 struct contexthub_ipc_info
*ipc
= data
;
1368 dev_info(ipc
->dev
, "%s called\n", __func__
);
1369 disable_irq_nosync(ipc
->irq_wdt
);
1370 ipc
->irq_wdt_disabled
= 1;
1371 contexthub_handle_debug(ipc
, CHUB_ERR_FW_WDT
, 1);
1377 static struct clk
*devm_clk_get_and_prepare(struct device
*dev
,
1380 struct clk
*clk
= NULL
;
1383 clk
= devm_clk_get(dev
, name
);
1385 dev_err(dev
, "Failed to get clock %s\n", name
);
1389 ret
= clk_prepare(clk
);
1391 dev_err(dev
, "Failed to prepare clock %s\n", name
);
1395 ret
= clk_enable(clk
);
1397 dev_err(dev
, "Failed to enable clock %s\n", name
);
1405 #if defined(CONFIG_SOC_EXYNOS9610)
1406 extern int cal_dll_apm_enable(void);
1409 static void __iomem
*get_iomem(struct platform_device
*pdev
,
1410 const char *name
, u32
*size
)
1412 struct resource
*res
;
1415 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
1416 if (IS_ERR_OR_NULL(res
)) {
1417 dev_err(&pdev
->dev
, "Failed to get %s\n", name
);
1418 return ERR_PTR(-EINVAL
);
1421 ret
= devm_ioremap_resource(&pdev
->dev
, res
);
1423 dev_err(&pdev
->dev
, "fails to get %s\n", name
);
1424 return ERR_PTR(-EINVAL
);
1428 *size
= resource_size(res
);
1429 dev_info(&pdev
->dev
, "%s: %s(%p) is mapped on %p with size of %zu",
1430 __func__
, name
, (void *)res
->start
, ret
, (size_t)resource_size(res
));
1435 static __init
int contexthub_ipc_hw_init(struct platform_device
*pdev
,
1436 struct contexthub_ipc_info
*chub
)
1440 struct resource
*res
;
1442 const char *resetmode
;
1443 const char *selectos
;
1444 struct device
*dev
= &pdev
->dev
;
1445 struct device_node
*node
= dev
->of_node
;
1446 const char *string_array
[10];
1452 dev_err(dev
, "driver doesn't support non-dt\n");
1456 /* get os type from dt */
1457 os
= of_get_property(node
, "os-type", NULL
);
1458 if (!os
|| !strcmp(os
, "none") || !strcmp(os
, "pass")) {
1459 dev_err(dev
, "no use contexthub\n");
1464 strcpy(chub
->os_name
, os
);
1467 /* get resetmode from dt */
1468 resetmode
= of_get_property(node
, "reset-mode", NULL
);
1469 if (!resetmode
|| !strcmp(resetmode
, "block"))
1470 chub
->block_reset
= 1;
1472 chub
->block_reset
= 0;
1474 /* get os select from dt */
1475 selectos
= of_get_property(node
, "os-select", NULL
);
1476 if (!selectos
|| strcmp(selectos
, "true")) {
1477 dev_info(dev
,"multi os disabled : %s\n", selectos
);
1478 chub
->sel_os
= true;
1480 dev_info(dev
,"multi os enabled : %s\n", selectos
);
1481 chub
->sel_os
= false;
1484 /* get mailbox interrupt */
1485 chub
->irq_mailbox
= irq_of_parse_and_map(node
, 0);
1486 if (chub
->irq_mailbox
< 0) {
1487 dev_err(dev
, "failed to get irq:%d\n", irq
);
1491 /* request irq handler */
1492 ret
= devm_request_irq(dev
, chub
->irq_mailbox
, contexthub_irq_handler
,
1493 0, dev_name(dev
), chub
);
1495 dev_err(dev
, "failed to request irq:%d, ret:%d\n",
1496 chub
->irq_mailbox
, ret
);
1500 #if defined(CHUB_RESET_ENABLE)
1501 /* get wdt interrupt optionally */
1502 chub
->irq_wdt
= irq_of_parse_and_map(node
, 1);
1503 if (chub
->irq_wdt
> 0) {
1504 /* request irq handler */
1505 ret
= devm_request_irq(dev
, chub
->irq_wdt
,
1506 contexthub_irq_wdt_handler
, 0,
1507 dev_name(dev
), chub
);
1509 dev_err(dev
, "failed to request wdt irq:%d, ret:%d\n",
1510 chub
->irq_wdt
, ret
);
1513 chub
->irq_wdt_disabled
= 0;
1515 dev_info(dev
, "don't use wdt irq:%d\n", irq
);
1519 /* get MAILBOX SFR */
1520 chub
->mailbox
= get_iomem(pdev
, "mailbox", NULL
);
1521 if (IS_ERR(chub
->mailbox
))
1522 return PTR_ERR(chub
->mailbox
);
1525 chub
->sram
= get_iomem(pdev
, "sram", &chub
->sram_size
);
1526 if (IS_ERR(chub
->sram
))
1527 return PTR_ERR(chub
->sram
);
1529 /* get chub gpr base */
1530 chub
->chub_dumpgpr
= get_iomem(pdev
, "dumpgpr", NULL
);
1531 if (IS_ERR(chub
->chub_dumpgpr
))
1532 return PTR_ERR(chub
->chub_dumpgpr
);
1534 chub
->pmu_chub_reset
= get_iomem(pdev
, "chub_reset", NULL
);
1535 if (IS_ERR(chub
->pmu_chub_reset
))
1536 return PTR_ERR(chub
->pmu_chub_reset
);
1538 chub
->chub_baaw
= get_iomem(pdev
, "chub_baaw", NULL
);
1539 if (IS_ERR(chub
->chub_baaw
))
1540 return PTR_ERR(chub
->chub_baaw
);
1542 #if defined(CONFIG_SOC_EXYNOS9610)
1543 /* get cmu qch base */
1544 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cmu_chub_qch");
1545 chub
->cmu_chub_qch
= devm_ioremap_resource(dev
, res
);
1546 if (IS_ERR(chub
->cmu_chub_qch
)) {
1547 pr_err("driver failed to get cmu_chub_qch\n");
1548 return PTR_ERR(chub
->cmu_chub_qch
);
1552 /* get addresses information to set BAAW */
1553 if (of_property_read_u32_index
1554 (node
, "baaw,baaw-p-apm-chub", 0,
1555 &chub
->baaw_info
.baaw_p_apm_chub_start
)) {
1557 "driver failed to get baaw-p-apm-chub, start\n");
1561 if (of_property_read_u32_index
1562 (node
, "baaw,baaw-p-apm-chub", 1,
1563 &chub
->baaw_info
.baaw_p_apm_chub_end
)) {
1565 "driver failed to get baaw-p-apm-chub, end\n");
1569 if (of_property_read_u32_index
1570 (node
, "baaw,baaw-p-apm-chub", 2,
1571 &chub
->baaw_info
.baaw_p_apm_chub_remap
)) {
1573 "driver failed to get baaw-p-apm-chub, remap\n");
1577 /* disable chub irq list (for sensor irq) */
1578 of_property_read_u32(node
, "chub-irq-pin-len", &chub
->irq_pin_len
);
1579 if (chub
->irq_pin_len
) {
1580 if (chub
->irq_pin_len
> sizeof(chub
->irq_pins
)) {
1582 "failed to get irq pin length %d, %d\n",
1583 chub
->irq_pin_len
, sizeof(chub
->irq_pins
));
1584 chub
->irq_pin_len
= 0;
1588 dev_info(&pdev
->dev
, "get chub irq_pin len:%d\n", chub
->irq_pin_len
);
1589 for (i
= 0; i
< chub
->irq_pin_len
; i
++) {
1590 chub
->irq_pins
[i
] = of_get_named_gpio(node
, "chub-irq-pin", i
);
1591 if (!gpio_is_valid(chub
->irq_pins
[i
])) {
1592 dev_err(&pdev
->dev
, "get invalid chub irq_pin:%d\n", chub
->irq_pins
[i
]);
1595 dev_info(&pdev
->dev
, "get chub irq_pin:%d\n", chub
->irq_pins
[i
]);
1598 #if defined(CONFIG_SOC_EXYNOS9610)
1599 cal_dll_apm_enable();
1602 clk
= devm_clk_get_and_prepare(dev
, "chub_bus");
1605 chub
->clkrate
= clk_get_rate(clk
);
1607 chub_clk_len
= of_property_count_strings(node
, "clock-names");
1608 of_property_read_string_array(node
, "clock-names", string_array
, chub_clk_len
);
1609 for (i
= 0; i
< chub_clk_len
; i
++) {
1610 clk
= devm_clk_get_and_prepare(dev
, string_array
[i
]);
1613 dev_info(&pdev
->dev
, "clk_name: %s enable\n", __clk_get_name(clk
));
1619 static ssize_t
chub_poweron(struct device
*dev
,
1620 struct device_attribute
*attr
,
1621 const char *buf
, size_t count
)
1623 struct contexthub_ipc_info
*ipc
= dev_get_drvdata(dev
);
1624 int ret
= contexthub_poweron(ipc
);
1626 return ret
< 0 ? ret
: count
;
1629 static ssize_t
chub_reset(struct device
*dev
,
1630 struct device_attribute
*attr
,
1631 const char *buf
, size_t count
)
1633 struct contexthub_ipc_info
*ipc
= dev_get_drvdata(dev
);
1634 int ret
= contexthub_reset(ipc
, 1, CHUB_ERR_NONE
);
1636 return ret
< 0 ? ret
: count
;
1639 static struct device_attribute attributes
[] = {
1640 __ATTR(poweron
, 0220, NULL
, chub_poweron
),
1641 __ATTR(reset
, 0220, NULL
, chub_reset
),
1644 #ifdef CONFIG_EXYNOS_ITMON
1645 static int chub_itmon_notifier(struct notifier_block
*nb
,
1646 unsigned long action
, void *nb_data
)
1648 struct contexthub_ipc_info
*data
= container_of(nb
, struct contexthub_ipc_info
, itmon_nb
);
1649 struct itmon_notifier
*itmon_data
= nb_data
;
1651 if (itmon_data
&& itmon_data
->master
&&
1652 ((!strncmp("CM4_SHUB_CD", itmon_data
->master
, sizeof("CM4_SHUB_CD") - 1)) ||
1653 (!strncmp("CM4_SHUB_P", itmon_data
->master
, sizeof("CM4_SHUB_P") - 1)) ||
1654 (!strncmp("PDMA_SHUB", itmon_data
->master
, sizeof("PDMA_SHUB") - 1)))) {
1655 dev_info(data
->dev
, "%s: chub(%s) itmon detected: action:%d!!\n",
1656 __func__
, itmon_data
->master
, action
);
1657 contexthub_handle_debug(data
, CHUB_ERR_ITMON
, 1);
1665 static int contexthub_ipc_probe(struct platform_device
*pdev
)
1667 struct contexthub_ipc_info
*chub
;
1668 int need_to_free
= 0;
1671 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1672 struct iio_dev
*iio_dev
;
1674 chub
= chub_dbg_get_memory(DBG_NANOHUB_DD_AREA
);
1677 devm_kzalloc(&pdev
->dev
, sizeof(struct contexthub_ipc_info
),
1682 dev_err(&pdev
->dev
, "%s failed to get ipc memory\n", __func__
);
1687 /* parse dt and hw init */
1688 ret
= contexthub_ipc_hw_init(pdev
, chub
);
1690 dev_err(&pdev
->dev
, "%s failed to get init hw with ret %d\n",
1695 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1697 iio_dev
= nanohub_probe(&pdev
->dev
, NULL
);
1698 if (IS_ERR(iio_dev
))
1701 /* set wakeup irq number on nanohub driver */
1702 chub
->data
= iio_priv(iio_dev
);
1703 nanohub_mailbox_comms_init(&chub
->data
->comms
);
1704 chub
->pdata
= chub
->data
->pdata
;
1705 chub
->pdata
->mailbox_client
= chub
;
1706 chub
->data
->irq1
= IRQ_EVT_A2C_WAKEUP
;
1707 chub
->data
->irq2
= 0;
1709 chub
->chub_rt_log
.loglevel
= 0;
1710 spin_lock_init(&chub
->logout_lock
);
1711 atomic_set(&chub
->in_use_ipc
, 0);
1712 atomic_set(&chub
->chub_status
, CHUB_ST_NO_POWER
);
1713 atomic_set(&chub
->in_reset
, 0);
1714 chub
->powermode
= 0; /* updated by fw bl */
1716 for (i
= 0; i
< CHUB_ERR_MAX
; i
++)
1717 chub
->err_cnt
[i
] = 0;
1718 chub
->dev
= &pdev
->dev
;
1719 platform_set_drvdata(pdev
, chub
);
1720 contexthub_config_init(chub
);
1722 for (i
= 0, ret
= 0; i
< ARRAY_SIZE(attributes
); i
++) {
1723 ret
= device_create_file(chub
->dev
, &attributes
[i
]);
1725 dev_warn(chub
->dev
, "Failed to create file: %s\n",
1726 attributes
[i
].attr
.name
);
1728 init_waitqueue_head(&chub
->poweron_lock
.event
);
1729 init_waitqueue_head(&chub
->reset_lock
.event
);
1730 init_waitqueue_head(&chub
->read_lock
.event
);
1731 init_waitqueue_head(&chub
->chub_alive_lock
.event
);
1732 atomic_set(&chub
->poweron_lock
.flag
, 0);
1733 atomic_set(&chub
->chub_alive_lock
.flag
, 0);
1734 INIT_WORK(&chub
->debug_work
, handle_debug_work_func
);
1735 INIT_WORK(&chub
->log_work
, handle_log_work_func
);
1736 chub
->log_work_reqcnt
= 0;
1737 #ifdef CONFIG_EXYNOS_ITMON
1738 chub
->itmon_nb
.notifier_call
= chub_itmon_notifier
;
1739 itmon_notifier_chain_register(&chub
->itmon_nb
);
1742 /* init fw runtime log */
1743 chub
->chub_rt_log
.buffer
= vzalloc(SZ_512K
* 2);
1744 if (!chub
->chub_rt_log
.buffer
) {
1748 chub
->chub_rt_log
.buffer_size
= SZ_512K
* 2;
1749 chub
->chub_rt_log
.write_index
= 0;
1751 dev_info(chub
->dev
, "%s with %s FW and %lu clk is done\n",
1752 __func__
, chub
->os_name
, chub
->clkrate
);
1757 devm_kfree(&pdev
->dev
, chub
);
1759 dev_err(&pdev
->dev
, "%s is fail with ret %d\n", __func__
, ret
);
1763 static int contexthub_ipc_remove(struct platform_device
*pdev
)
1768 static int contexthub_suspend(struct device
*dev
)
1770 struct contexthub_ipc_info
*ipc
= dev_get_drvdata(dev
);
1771 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1772 struct nanohub_data
*data
= ipc
->data
;
1775 if (atomic_read(&ipc
->chub_status
) != CHUB_ST_RUN
)
1778 dev_info(dev
, "nanohub log to kernel off\n");
1779 ipc_hw_write_shared_reg(AP
, AP_SLEEP
, SR_3
);
1780 ipc_hw_gen_interrupt(AP
, IRQ_EVT_CHUB_ALIVE
);
1782 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1783 return nanohub_suspend(data
->iio_dev
);
1789 static int contexthub_resume(struct device
*dev
)
1791 struct contexthub_ipc_info
*ipc
= dev_get_drvdata(dev
);
1792 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1793 struct nanohub_data
*data
= ipc
->data
;
1797 if (atomic_read(&ipc
->chub_status
) != CHUB_ST_RUN
)
1800 dev_info(dev
, "nanohub log to kernel on\n");
1801 ipc_hw_write_shared_reg(AP
, AP_WAKE
, SR_3
);
1802 ipc_hw_gen_interrupt(AP
, IRQ_EVT_CHUB_ALIVE
);
1804 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1805 ret
= nanohub_resume(data
->iio_dev
);
1810 //static SIMPLE_DEV_PM_OPS(contexthub_pm_ops, contexthub_suspend, contexthub_resume);
1811 static const struct dev_pm_ops contexthub_pm_ops
= {
1812 .suspend
= contexthub_suspend
,
1813 .resume
= contexthub_resume
,
1816 static const struct of_device_id contexthub_ipc_match
[] = {
1817 {.compatible
= "samsung,exynos-nanohub"},
1821 static struct platform_driver samsung_contexthub_ipc_driver
= {
1822 .probe
= contexthub_ipc_probe
,
1823 .remove
= contexthub_ipc_remove
,
1825 .name
= "nanohub-ipc",
1826 .owner
= THIS_MODULE
,
1827 .of_match_table
= contexthub_ipc_match
,
1828 .pm
= &contexthub_pm_ops
,
1832 int nanohub_mailbox_init(void)
1834 return platform_driver_register(&samsung_contexthub_ipc_driver
);
1837 static void __exit
nanohub_mailbox_cleanup(void)
1839 platform_driver_unregister(&samsung_contexthub_ipc_driver
);
1842 module_init(nanohub_mailbox_init
);
1843 module_exit(nanohub_mailbox_cleanup
);
1845 MODULE_LICENSE("GPL v2");
1846 MODULE_DESCRIPTION("Exynos contexthub mailbox Driver");
1847 MODULE_AUTHOR("Boojin Kim <boojin.kim@samsung.com>");