[RAMEN9610-20876][MODAP-53888][COMMON] chub: change resume from complete to resume
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / staging / nanohub / chub.c
1 /*
2 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
3 *
4 * Boojin Kim <boojin.kim@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/module.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/of.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_address.h>
19 #include <linux/of_reserved_mem.h>
20 #include <linux/platform_device.h>
21 #include <linux/debugfs.h>
22 #include <linux/interrupt.h>
23 #include <linux/workqueue.h>
24 #include <linux/iio/iio.h>
25 #include <linux/wakelock.h>
26 #include <linux/delay.h>
27 #include <linux/firmware.h>
28 #include <linux/random.h>
29 #include <linux/rtc.h>
30 #include <linux/clk.h>
31 #include <linux/clk-provider.h>
32 #include <linux/timekeeping.h>
33 #include <linux/of_gpio.h>
34 #include <linux/fcntl.h>
35 #include <uapi/linux/sched/types.h>
36
37 #ifdef CONFIG_EXYNOS_ITMON
38 #include <soc/samsung/exynos-itmon.h>
39 #endif
40
41 #ifdef CONFIG_CHRE_SENSORHUB_HAL
42 #include "main.h"
43 #endif
44 #include "bl.h"
45 #include "comms.h"
46 #include "chub.h"
47 #include "chub_ipc.h"
48 #include "chub_dbg.h"
49 #include "../../soc/samsung/cal-if/pmucal_shub.h"
50
51 #define WAIT_TIMEOUT_MS (1000)
52 enum { CHUB_ON, CHUB_OFF };
53 enum { C2A_ON, C2A_OFF };
54
55 const char *os_image[SENSOR_VARIATION] = {
56 "os.checked_0.bin",
57 "os.checked_1.bin",
58 "os.checked_2.bin",
59 "os.checked_3.bin",
60 "os.checked_4.bin",
61 "os.checked_5.bin",
62 "os.checked_6.bin",
63 "os.checked_7.bin",
64 "os.checked_8.bin",
65 };
66
67 #define USE_NO_PANIC_ON_POWERON /* hack for panic */
68 static DEFINE_MUTEX(reset_mutex);
69 static DEFINE_MUTEX(pmu_shutdown_mutex);
70 static DEFINE_MUTEX(log_mutex);
71 static DEFINE_MUTEX(wt_mutex);
72
73 void chub_wake_event(struct chub_alive *event)
74 {
75 atomic_set(&event->flag, 1);
76 wake_up_interruptible_sync(&event->event);
77 }
78
79 static int chub_wait_event(struct chub_alive *event)
80 {
81 atomic_set(&event->flag, 0);
82 return wait_event_interruptible_timeout(event->event,
83 atomic_read(&event->flag),
84 msecs_to_jiffies(WAIT_TIMEOUT_MS * 2));
85 }
86
87 static int contexthub_get_token(struct contexthub_ipc_info *ipc)
88 {
89 if (atomic_read(&ipc->in_reset))
90 return -EINVAL;
91
92 atomic_inc(&ipc->in_use_ipc);
93 return 0;
94 }
95
96 static void contexthub_put_token(struct contexthub_ipc_info *ipc)
97 {
98 atomic_dec(&ipc->in_use_ipc);
99 }
100
101 /* host interface functions */
102 int contexthub_is_run(struct contexthub_ipc_info *ipc)
103 {
104 if (!ipc->powermode)
105 return 1;
106
107 #ifdef CONFIG_CHRE_SENSORHUB_HAL
108 return nanohub_irq1_fired(ipc->data);
109 #else
110 return 1;
111 #endif
112 }
113
114 /* request contexthub to host driver */
115 int contexthub_request(struct contexthub_ipc_info *ipc)
116 {
117 if (!ipc->powermode)
118 return 0;
119
120 #ifdef CONFIG_CHRE_SENSORHUB_HAL
121 return request_wakeup_timeout(ipc->data, WAIT_TIMEOUT_MS);
122 #else
123 return 0;
124 #endif
125 }
126
127 /* rlease contexthub to host driver */
128 void contexthub_release(struct contexthub_ipc_info *ipc)
129 {
130 if (!ipc->powermode)
131 return;
132
133 #ifdef CONFIG_CHRE_SENSORHUB_HAL
134 release_wakeup(ipc->data);
135 #endif
136 }
137
138 static inline void contexthub_notify_host(struct contexthub_ipc_info *ipc)
139 {
140 #ifdef CONFIG_CHRE_SENSORHUB_HAL
141 nanohub_handle_irq1(ipc->data);
142 #else
143 /* TODO */
144 #endif
145 }
146
147 #ifdef CONFIG_CHRE_SENSORHUB_HAL
148 /* by nanohub kernel RxBufStruct. packet header is 10 + 2 bytes to align */
149 struct rxbuf {
150 u8 pad;
151 u8 pre_preamble;
152 u8 buf[PACKET_SIZE_MAX];
153 u8 post_preamble;
154 };
155
156 static int nanohub_mailbox_open(void *data)
157 {
158 return 0;
159 }
160
161 static void nanohub_mailbox_close(void *data)
162 {
163 (void)data;
164 }
165
166 static int nanohub_mailbox_write(void *data, uint8_t *tx, int length,
167 int timeout)
168 {
169 struct nanohub_data *ipc = data;
170
171 return contexthub_ipc_write(ipc->pdata->mailbox_client, tx, length, timeout);
172 }
173
174 static int nanohub_mailbox_read(void *data, uint8_t *rx, int max_length,
175 int timeout)
176 {
177 struct nanohub_data *ipc = data;
178
179 return contexthub_ipc_read(ipc->pdata->mailbox_client, rx, max_length, timeout);
180 }
181
182 void nanohub_mailbox_comms_init(struct nanohub_comms *comms)
183 {
184 comms->seq = 1;
185 comms->timeout_write = 544;
186 comms->timeout_ack = 272;
187 comms->timeout_reply = 512;
188 comms->open = nanohub_mailbox_open;
189 comms->close = nanohub_mailbox_close;
190 comms->write = nanohub_mailbox_write;
191 comms->read = nanohub_mailbox_read;
192 }
193 #endif
194
195 static int contexthub_read_process(uint8_t *rx, u8 *raw_rx, u32 size)
196 {
197 #ifdef CONFIG_CHRE_SENSORHUB_HAL
198 struct rxbuf *rxstruct;
199 struct nanohub_packet *packet;
200
201 rxstruct = (struct rxbuf *)raw_rx;
202 packet = (struct nanohub_packet *)&rxstruct->pre_preamble;
203 memcpy_fromio(rx, (void *)packet, size);
204
205 return NANOHUB_PACKET_SIZE(packet->len);
206 #else
207 memcpy_fromio(rx, (void *)raw_rx, size);
208 return size;
209 #endif
210 }
211
212 static int contexthub_ipc_drv_init(struct contexthub_ipc_info *chub)
213 {
214 struct device *chub_dev = chub->dev;
215 int ret = 0;
216
217 chub->ipc_map = ipc_get_chub_map();
218 if (!chub->ipc_map)
219 return -EINVAL;
220
221 /* init debug-log */
222 /* HACK for clang */
223 chub->ipc_map->logbuf.logbuf.eq = 0;
224 chub->ipc_map->logbuf.logbuf.dq = 0;
225 chub->fw_log = log_register_buffer(chub_dev, 0,
226 (void *)&chub->ipc_map->logbuf.logbuf,
227 "fw", 1);
228 if (!chub->fw_log)
229 return -EINVAL;
230
231 if (chub->irq_pin_len) {
232 int i;
233
234 for (i = 0; i < chub->irq_pin_len; i++) {
235 u32 irq = gpio_to_irq(chub->irq_pins[i]);
236
237 disable_irq_nosync(irq);
238 dev_info(chub_dev, "%s: %d irq (pin:%d) is for chub. disable it\n",
239 __func__, irq, chub->irq_pins[i]);
240 }
241 }
242
243 #ifdef LOWLEVEL_DEBUG
244 chub->dd_log_buffer = vmalloc(SZ_256K + sizeof(struct LOG_BUFFER *));
245 chub->dd_log_buffer->index_reader = 0;
246 chub->dd_log_buffer->index_writer = 0;
247 chub->dd_log_buffer->size = SZ_256K;
248 chub->dd_log =
249 log_register_buffer(chub_dev, 1, chub->dd_log_buffer, "dd", 0);
250 #endif
251 ret = chub_dbg_init(chub, chub->chub_rt_log.buffer, chub->chub_rt_log.buffer_size);
252 if (ret)
253 dev_err(chub_dev, "%s: fails. ret:%d\n", __func__, ret);
254
255 return ret;
256 }
257
258 #ifdef PACKET_LOW_DEBUG
259 static void debug_dumpbuf(unsigned char *buf, int len)
260 {
261 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, 16, 1, buf, len,
262 false);
263 }
264 #endif
265
266 static inline int get_recv_channel(struct recv_ctrl *recv)
267 {
268 int i;
269 unsigned long min_order = 0;
270 int min_order_evt = INVAL_CHANNEL;
271
272 for (i = 0; i < IPC_BUF_NUM; i++) {
273 if (recv->container[i]) {
274 if (!min_order) {
275 min_order = recv->container[i];
276 min_order_evt = i;
277 } else if (recv->container[i] < min_order) {
278 min_order = recv->container[i];
279 min_order_evt = i;
280 }
281 }
282 }
283
284 if (min_order_evt != INVAL_CHANNEL)
285 recv->container[min_order_evt] = 0;
286
287 return min_order_evt;
288 }
289
290 /* simple alive check function : don't use ipc map */
291 static bool contexthub_lowlevel_alive(struct contexthub_ipc_info *ipc)
292 {
293 int ret;
294
295 atomic_set(&ipc->chub_alive_lock.flag, 0);
296 ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
297 ret = chub_wait_event(&ipc->chub_alive_lock);
298 dev_info(ipc->dev, "%s done: ret:%d\n", __func__, ret);
299 return atomic_read(&ipc->chub_alive_lock.flag);
300 }
301
302 #define CHUB_RESET_THOLD (10)
303 /* handle errors of chub driver and fw */
304 static void handle_debug_work(struct contexthub_ipc_info *ipc, enum chub_err_type err)
305 {
306 int need_reset = 0;
307 int alive = contexthub_lowlevel_alive(ipc);
308
309 dev_info(ipc->dev, "%s: err:%d, alive:%d, status:%d, in-reset:%d\n",
310 __func__, err, alive, __raw_readl(&ipc->chub_status),
311 __raw_readl(&ipc->in_reset));
312 if ((atomic_read(&ipc->chub_status) == CHUB_ST_ERR) || !alive)
313 need_reset = 1;
314
315 /* reset */
316 if (need_reset) {
317 #if defined(CHUB_RESET_ENABLE)
318 int ret;
319
320 dev_info(ipc->dev, "%s: request silent reset. err:%d, alive:%d, status:%d, in-reset:%d\n",
321 __func__, err, alive, __raw_readl(&ipc->chub_status),
322 __raw_readl(&ipc->in_reset));
323 ret = contexthub_reset(ipc, 1, err);
324 if (ret)
325 dev_warn(ipc->dev, "%s: fails to reset:%d. status:%d\n",
326 __func__, ret, __raw_readl(&ipc->chub_status));
327 else
328 dev_info(ipc->dev, "%s: chub reset! should be recovery\n",
329 __func__);
330 #else
331 dev_info(ipc->dev, "%s: chub hang. wait for sensor driver reset\n",
332 __func__, err, alive, __raw_readl(&ipc->chub_status),
333 __raw_readl(&ipc->in_reset));
334
335 atomic_set(&ipc->chub_status, CHUB_ST_HANG);
336 #endif
337 }
338 }
339
340 static void contexthub_handle_debug(struct contexthub_ipc_info *ipc,
341 enum chub_err_type err, bool enable_wq)
342 {
343 dev_info(ipc->dev, "%s: err:%d(cnt:%d), enable_wq:%d\n",
344 __func__, err, ipc->err_cnt[err], enable_wq);
345
346 /* set status in CHUB_ST_ERR */
347 if ((err == CHUB_ERR_ITMON) || (err == CHUB_ERR_FW_WDT) || (err == CHUB_ERR_FW_FAULT) || (err == CHUB_ERR_CHUB_NO_RESPONSE)) {
348 atomic_set(&ipc->chub_status, CHUB_ST_ERR);
349 goto error_handler;
350 }
351
352 /* get chub-fw err */
353 if (err == CHUB_ERR_NANOHUB) {
354 enum ipc_debug_event fw_evt;
355
356 if (contexthub_get_token(ipc)) {
357 dev_warn(ipc->dev, "%s: get token\n", __func__);
358 return;
359 }
360 fw_evt = ipc_read_debug_event(AP);
361 if (fw_evt == IPC_DEBUG_CHUB_FAULT)
362 err = CHUB_ERR_FW_FAULT;
363 else if ((fw_evt == IPC_DEBUG_CHUB_ASSERT) || (fw_evt == IPC_DEBUG_CHUB_ERROR))
364 err = CHUB_ERR_FW_ERROR;
365 else
366 dev_warn(ipc->dev, "%s: unsupported fw_evt: %d\n", fw_evt);
367
368 ipc_write_debug_event(AP, 0);
369 contexthub_put_token(ipc);
370
371 if (ipc->err_cnt[err] > CHUB_RESET_THOLD) {
372 atomic_set(&ipc->chub_status, CHUB_ST_ERR);
373 ipc->err_cnt[err] = 0;
374 dev_info(ipc->dev, "%s: err:%d(cnt:%d), enter error status\n",
375 __func__, err, ipc->err_cnt[err]);
376 } else {
377 ipc->err_cnt[err]++;
378 return;
379 }
380 }
381
382 error_handler:
383 /* handle err */
384 if (enable_wq) {
385 ipc->cur_err |= (1 << err);
386 schedule_work(&ipc->debug_work);
387 } else {
388 handle_debug_work(ipc, err);
389 }
390 }
391
392 static void contexthub_select_os(struct contexthub_ipc_info *ipc)
393 {
394 int trycnt = 0;
395 u8 val = (u8) ipc_read_val(AP);
396 if(!val){
397 dev_warn(ipc->dev, "%s os number is invalid\n");
398 val = 1;
399 }
400 ipc->sel_os = true;
401
402 strcpy(ipc->os_name, os_image[val]);
403 dev_info(ipc->dev, "%s selected os_name = %s\n", __func__, ipc->os_name);
404
405 log_flush_all();
406 contexthub_download_image(ipc, IPC_REG_OS);
407 ipc_hw_write_shared_reg(AP, ipc->os_load, SR_BOOT_MODE);
408 ipc_write_val(AP, 99);
409 do {
410 msleep(WAIT_CHUB_MS);
411 contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
412 if (++trycnt > WAIT_TRY_CNT)
413 break;
414 } while ((atomic_read(&ipc->chub_status) != CHUB_ST_RUN));
415
416 if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN)
417 dev_info(ipc->dev, "%s done. contexthub status is %d\n",
418 __func__, atomic_read(&ipc->chub_status));
419 else
420 dev_warn(ipc->dev, "%s failed. contexthub status is %d\n",
421 __func__, atomic_read(&ipc->chub_status));
422
423 dev_info(ipc->dev, "%s done: wakeup interrupt\n", __func__);
424 chub_wake_event(&ipc->poweron_lock);
425 }
426
427 static DEFINE_MUTEX(dbg_mutex);
428 static void handle_debug_work_func(struct work_struct *work)
429 {
430 struct contexthub_ipc_info *ipc =
431 container_of(work, struct contexthub_ipc_info, debug_work);
432 int i;
433
434 if (atomic_read(&ipc->chub_status) == CHUB_ST_POWER_ON && ipc->sel_os == false) {
435 contexthub_select_os(ipc);
436 return;
437 }
438
439 dev_info(ipc->dev, "%s: cur_err:0x%x, chub_stats:%d\n", __func__, ipc->cur_err, atomic_read(&ipc->chub_status));
440 for (i = 0; i < CHUB_ERR_MAX; i++) {
441 if (ipc->cur_err & (1 << i)) {
442 dev_info(ipc->dev, "%s: loop: err:%d, cur_err:0x%x\n", __func__, i, ipc->cur_err);
443 ipc->cur_err &= ~(1 << i);
444 handle_debug_work(ipc, i);
445 }
446 }
447 }
448
449 void contexthub_print_rtlog(struct contexthub_ipc_info *ipc, bool loop)
450 {
451 if (!atomic_read(&ipc->log_work_active)) {
452 if (contexthub_get_token(ipc)) {
453 dev_warn(ipc->dev, "%s: get token\n", __func__);
454 return;
455 }
456 if (ipc_logbuf_outprint(&ipc->chub_rt_log, loop))
457 chub_dbg_dump_hw(ipc, CHUB_ERR_NANOHUB);
458 contexthub_put_token(ipc);
459 }
460 }
461
462 static void handle_log_work_func(struct work_struct *work)
463 {
464 struct contexthub_ipc_info *ipc =
465 container_of(work, struct contexthub_ipc_info, log_work);
466 int retrycnt = 0;
467
468 retry:
469 if (contexthub_get_token(ipc)) {
470 chub_wait_event(&ipc->reset_lock);
471 if (!retrycnt) {
472 retrycnt++;
473 goto retry;
474 }
475 atomic_set(&ipc->log_work_active, 0);
476 return;
477 }
478 ipc_logbuf_flush_on(1);
479 mutex_lock(&log_mutex);
480 if (ipc_logbuf_outprint(&ipc->chub_rt_log, 100))
481 chub_dbg_dump_hw(ipc, CHUB_ERR_NANOHUB);
482 mutex_unlock(&log_mutex);
483 ipc_logbuf_flush_on(0);
484 contexthub_put_token(ipc);
485 atomic_set(&ipc->log_work_active, 0);
486 }
487
488 static inline void clear_err_cnt(struct contexthub_ipc_info *ipc, enum chub_err_type err)
489 {
490 if (ipc->err_cnt[err])
491 ipc->err_cnt[err] = 0;
492 }
493
494 int contexthub_ipc_read(struct contexthub_ipc_info *ipc, uint8_t *rx, int max_length,
495 int timeout)
496 {
497 unsigned long flag;
498 int size = 0;
499 int ret = 0;
500 void *rxbuf;
501 u64 time = 0; /* for debug */
502
503 if (!atomic_read(&ipc->read_lock.cnt)) {
504 time = sched_clock();
505
506 spin_lock_irqsave(&ipc->read_lock.event.lock, flag);
507 atomic_inc(&ipc->read_lock.flag);
508 ret =
509 wait_event_interruptible_timeout_locked(ipc->read_lock.event,
510 atomic_read(&ipc->read_lock.cnt),
511 msecs_to_jiffies(timeout));
512 atomic_dec(&ipc->read_lock.flag);
513 spin_unlock_irqrestore(&ipc->read_lock.event.lock, flag);
514 if (ret < 0)
515 dev_warn(ipc->dev,
516 "fails to get read ret:%d timeout:%d\n", ret, timeout);
517 }
518
519 if (__raw_readl(&ipc->chub_status) != CHUB_ST_RUN) {
520 dev_warn(ipc->dev, "%s: chub isn't run:%d\n", __raw_readl(&ipc->chub_status));
521 return 0;
522 }
523
524 if (contexthub_get_token(ipc)) {
525 dev_warn(ipc->dev, "no-active: read fails\n");
526 return 0;
527 }
528
529 if (atomic_read(&ipc->read_lock.cnt)) {
530 rxbuf = ipc_read_data(IPC_DATA_C2A, &size);
531 if (size > 0) {
532 ret = contexthub_read_process(rx, rxbuf, size);
533 atomic_dec(&ipc->read_lock.cnt);
534 }
535 } else {
536 dev_dbg(ipc->dev, "%s: read timeout(%d): c2aq_cnt:%d, recv_cnt:%d during %lld ns\n",
537 __func__, ipc->err_cnt[CHUB_ERR_READ_FAIL],
538 ipc_get_data_cnt(IPC_DATA_C2A), atomic_read(&ipc->read_lock.cnt),
539 sched_clock() - time);
540 if (ipc_get_data_cnt(IPC_DATA_C2A)) {
541 ipc->err_cnt[CHUB_ERR_READ_FAIL]++;
542 ipc_dump();
543 }
544 ret = -EINVAL;
545 }
546 contexthub_put_token(ipc);
547 return ret;
548 }
549
550 int contexthub_ipc_write(struct contexthub_ipc_info *ipc,
551 uint8_t *tx, int length, int timeout)
552 {
553 int ret;
554
555 if (__raw_readl(&ipc->chub_status) != CHUB_ST_RUN) {
556 dev_warn(ipc->dev, "%s: chub isn't run:%d\n", __raw_readl(&ipc->chub_status));
557 return 0;
558 }
559
560 if (contexthub_get_token(ipc)) {
561 dev_warn(ipc->dev, "no-active: write fails\n");
562 return 0;
563 }
564
565 mutex_lock(&wt_mutex);
566 ret = ipc_write_data(IPC_DATA_A2C, tx, (u16)length);
567 mutex_unlock(&wt_mutex);
568 contexthub_put_token(ipc);
569 if (ret) {
570 pr_err("%s: fails to write data: ret:%d, len:%d errcnt:%d\n",
571 __func__, ret, length, ipc->err_cnt[CHUB_ERR_WRITE_FAIL]);
572 contexthub_handle_debug(ipc, CHUB_ERR_WRITE_FAIL, 0);
573 length = 0;
574 } else {
575 clear_err_cnt(ipc, CHUB_ERR_WRITE_FAIL);
576 }
577 return length;
578 }
579
580 static void check_rtc_time(void)
581 {
582 struct rtc_device *chub_rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
583 struct rtc_device *ap_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
584 struct rtc_time chub_tm, ap_tm;
585 time64_t chub_t, ap_t;
586
587 rtc_read_time(ap_rtc, &chub_tm);
588 rtc_read_time(chub_rtc, &ap_tm);
589
590 chub_t = rtc_tm_sub(&chub_tm, &ap_tm);
591
592 if (chub_t) {
593 pr_info("nanohub %s: diff_time: %llu\n", __func__, chub_t);
594 rtc_set_time(chub_rtc, &ap_tm);
595 };
596
597 chub_t = rtc_tm_to_time64(&chub_tm);
598 ap_t = rtc_tm_to_time64(&ap_tm);
599 }
600
601 static int contexthub_hw_reset(struct contexthub_ipc_info *ipc,
602 enum mailbox_event event)
603 {
604 u32 val;
605 int trycnt = 0;
606 int ret = 0;
607 int i;
608
609 dev_info(ipc->dev, "%s. status:%d\n",
610 __func__, __raw_readl(&ipc->chub_status));
611
612 /* clear ipc value */
613 atomic_set(&ipc->wakeup_chub, CHUB_OFF);
614 atomic_set(&ipc->irq1_apInt, C2A_OFF);
615 atomic_set(&ipc->read_lock.cnt, 0);
616 atomic_set(&ipc->read_lock.flag, 0);
617 atomic_set(&ipc->log_work_active, 0);
618
619 /* chub err init */
620 for (i = 0; i < CHUB_ERR_MAX; i++) {
621 if (i == CHUB_ERR_RESET_CNT)
622 continue;
623 ipc->err_cnt[i] = 0;
624 }
625
626 ipc_hw_write_shared_reg(AP, ipc->os_load, SR_BOOT_MODE);
627 ipc_set_chub_clk((u32)ipc->clkrate);
628 ipc->chub_rt_log.loglevel = CHUB_RT_LOG_DUMP_PRT;
629 ipc_set_chub_bootmode(BOOTMODE_COLD, ipc->chub_rt_log.loglevel);
630 switch (event) {
631 case MAILBOX_EVT_POWER_ON:
632 #ifdef NEED_TO_RTC_SYNC
633 check_rtc_time();
634 #endif
635 if (atomic_read(&ipc->chub_status) == CHUB_ST_NO_POWER) {
636 atomic_set(&ipc->chub_status, CHUB_ST_POWER_ON);
637
638 /* enable Dump gpr */
639 IPC_HW_WRITE_DUMPGPR_CTRL(ipc->chub_dumpgpr, 0x1);
640
641 #if defined(CONFIG_SOC_EXYNOS9610)
642 /* cmu cm4 clock - gating */
643 val = __raw_readl(ipc->cmu_chub_qch +
644 REG_QCH_CON_CM4_SHUB_QCH);
645 val &= ~(IGNORE_FORCE_PM_EN | CLOCK_REQ | ENABLE);
646 __raw_writel((val | IGNORE_FORCE_PM_EN),
647 ipc->cmu_chub_qch +
648 REG_QCH_CON_CM4_SHUB_QCH);
649 #endif
650 /* pmu reset-release on CHUB */
651 val = __raw_readl(ipc->pmu_chub_reset +
652 REG_CHUB_RESET_CHUB_OPTION);
653 __raw_writel((val | CHUB_RESET_RELEASE_VALUE),
654 ipc->pmu_chub_reset +
655 REG_CHUB_RESET_CHUB_OPTION);
656
657 #if defined(CONFIG_SOC_EXYNOS9610)
658 /* check chub cpu status */
659 do {
660 val = __raw_readl(ipc->pmu_chub_reset +
661 REG_CHUB_RESET_CHUB_CONFIGURATION);
662 msleep(WAIT_TIMEOUT_MS);
663 if (++trycnt > WAIT_TRY_CNT) {
664 dev_warn(ipc->dev,
665 "chub cpu status is not set correctly\n");
666 break;
667 }
668 } while ((val & 0x1) == 0x0);
669
670 /* cmu cm4 clock - release */
671 val = __raw_readl(ipc->cmu_chub_qch +
672 REG_QCH_CON_CM4_SHUB_QCH);
673 val &= ~(IGNORE_FORCE_PM_EN | CLOCK_REQ | ENABLE);
674 __raw_writel((val | IGNORE_FORCE_PM_EN | CLOCK_REQ),
675 ipc->cmu_chub_qch +
676 REG_QCH_CON_CM4_SHUB_QCH);
677
678 val = __raw_readl(ipc->cmu_chub_qch +
679 REG_QCH_CON_CM4_SHUB_QCH);
680 val &= ~(IGNORE_FORCE_PM_EN | CLOCK_REQ | ENABLE);
681 __raw_writel((val | CLOCK_REQ),
682 ipc->cmu_chub_qch +
683 REG_QCH_CON_CM4_SHUB_QCH);
684 #endif
685 } else {
686 ret = -EINVAL;
687 dev_warn(ipc->dev,
688 "fails to contexthub power on. Status is %d\n",
689 atomic_read(&ipc->chub_status));
690 }
691 break;
692 case MAILBOX_EVT_RESET:
693 ret = pmucal_shub_reset_release();
694 break;
695 default:
696 break;
697 }
698
699 if (ipc->sel_os == false) {
700 dev_info(ipc->dev, "%s -> os select\n", __func__);
701 return 0;
702 }
703
704 if (ret)
705 return ret;
706 else {
707 /* wait active */
708 dev_info(ipc->dev, "%s: alive check\n", __func__);
709 trycnt = 0;
710 do {
711 msleep(WAIT_CHUB_MS);
712 contexthub_ipc_write_event(ipc, MAILBOX_EVT_CHUB_ALIVE);
713 if (++trycnt > WAIT_TRY_CNT)
714 break;
715 } while ((atomic_read(&ipc->chub_status) != CHUB_ST_RUN));
716
717 if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN) {
718 dev_info(ipc->dev, "%s done. contexthub status is %d\n",
719 __func__, atomic_read(&ipc->chub_status));
720 return 0;
721 } else {
722 dev_warn(ipc->dev, "%s fails. contexthub status is %d\n",
723 __func__, atomic_read(&ipc->chub_status));
724 return -ETIMEDOUT;
725 }
726 }
727 }
728
729 static void contexthub_config_init(struct contexthub_ipc_info *chub)
730 {
731 /* BAAW-P-APM-CHUB for CHUB to access APM_CMGP. 1 window is used */
732 if (chub->chub_baaw) {
733 IPC_HW_WRITE_BAAW_CHUB0(chub->chub_baaw,
734 chub->baaw_info.baaw_p_apm_chub_start);
735 IPC_HW_WRITE_BAAW_CHUB1(chub->chub_baaw,
736 chub->baaw_info.baaw_p_apm_chub_end);
737 IPC_HW_WRITE_BAAW_CHUB2(chub->chub_baaw,
738 chub->baaw_info.baaw_p_apm_chub_remap);
739 IPC_HW_WRITE_BAAW_CHUB3(chub->chub_baaw, BAAW_RW_ACCESS_ENABLE);
740 }
741
742 /* enable mailbox ipc */
743 ipc_set_base(chub->sram);
744 ipc_set_owner(AP, chub->mailbox, IPC_SRC);
745 }
746 #define os_name_idx (11)
747
748 int contexthub_get_sensortype(struct contexthub_ipc_info *ipc, char *buf)
749 {
750 struct sensor_map *sensor_map;
751 struct saved_setting *pack = (struct saved_setting *) buf;
752 int len = 0;
753 int trycnt = 0;
754 int ret;
755 unsigned int *tmp = (unsigned int *)pack;
756 int i;
757
758 if (atomic_read(&ipc->chub_status) != CHUB_ST_RUN) {
759 dev_warn(ipc->dev, "%s :fails chub isn't active, status:%d, inreset:%d\n",
760 __func__, atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset));
761 return -EINVAL;
762 }
763
764 ret = contexthub_get_token(ipc);
765 if (ret) {
766 do {
767 msleep(WAIT_CHUB_MS);
768 if (++trycnt > WAIT_TRY_CNT)
769 break;
770 ret = contexthub_get_token(ipc);
771 } while (ret);
772
773 if (ret) {
774 dev_warn(ipc->dev, "%s fails to get token\n", __func__);
775 return -EINVAL;
776 }
777 }
778 sensor_map = ipc_get_base(IPC_REG_IPC_SENSORINFO);
779 if (ipc_have_sensor_info(sensor_map)) {
780
781 pack->num_os = ipc->os_name[os_name_idx] - '0';
782 len = ipc_get_offset(IPC_REG_IPC_SENSORINFO);
783 dev_info(ipc->dev, "%s: get sensorinfo: %p (os:%d, size:%d, %d / %d %d %d)\n", __func__, sensor_map, pack->num_os, len, sizeof(struct saved_setting),
784 sizeof(pack->magic), sizeof(pack->num_os), sizeof(pack->readbuf));
785 memcpy(&pack->readbuf, ipc_get_sensor_base(), len);
786 for (i = 0; i < SENSOR_TYPE_MAX; i++)
787 if (sensor_map->active_sensor_list[i])
788 dev_info(ipc->dev, "%s: get sensorinfo: type:%d, id:%d - %d\n", __func__, i, sensor_map->active_sensor_list[i], pack->readbuf[i]);
789 } else {
790 dev_err(ipc->dev, "%s: fails to get sensorinfo: %p\n", __func__, sensor_map);
791 }
792 contexthub_put_token(ipc);
793
794 for (i = 0; i < sizeof(struct saved_setting) / sizeof(int); i++, tmp++)
795 pr_info("%s: %d: 0x%x\n", __func__, i, *tmp);
796 return sizeof(struct saved_setting);
797 }
798
799 void contexthub_ipc_status_reset(struct contexthub_ipc_info *ipc)
800 {
801 /* clear ipc value */
802 atomic_set(&ipc->wakeup_chub, CHUB_OFF);
803 atomic_set(&ipc->irq1_apInt, C2A_OFF);
804 atomic_set(&ipc->read_lock.cnt, 0x0);
805 atomic_set(&ipc->log_work_active, 0);
806 memset_io(ipc_get_base(IPC_REG_IPC_A2C), 0, ipc_get_offset(IPC_REG_IPC_A2C));
807 memset_io(ipc_get_base(IPC_REG_IPC_C2A), 0, ipc_get_offset(IPC_REG_IPC_C2A));
808 memset_io(ipc_get_base(IPC_REG_IPC_EVT_A2C), 0, ipc_get_offset(IPC_REG_IPC_EVT_A2C));
809 memset_io(ipc_get_base(IPC_REG_IPC_EVT_C2A), 0, ipc_get_offset(IPC_REG_IPC_EVT_C2A));
810 memset_io(ipc_get_base(IPC_REG_IPC_EVT_A2C_CTRL), 0, ipc_get_offset(IPC_REG_IPC_EVT_A2C_CTRL));
811 memset_io(ipc_get_base(IPC_REG_IPC_EVT_C2A_CTRL), 0, ipc_get_offset(IPC_REG_IPC_EVT_C2A_CTRL));
812 ipc_hw_clear_all_int_pend_reg(AP);
813 ipc_hw_set_mcuctrl(AP, 0x1);
814 }
815
816 int contexthub_ipc_write_event(struct contexthub_ipc_info *ipc,
817 enum mailbox_event event)
818 {
819 u32 val;
820 int ret = 0;
821 int need_ipc = 0;
822
823 switch (event) {
824 case MAILBOX_EVT_INIT_IPC:
825 ret = contexthub_ipc_drv_init(ipc);
826 break;
827 case MAILBOX_EVT_POWER_ON:
828 ret = contexthub_hw_reset(ipc, event);
829 if (!ret)
830 log_flush_all();
831 break;
832 case MAILBOX_EVT_RESET:
833 if (atomic_read(&ipc->chub_status) == CHUB_ST_SHUTDOWN) {
834 ret = contexthub_hw_reset(ipc, event);
835 } else {
836 dev_err(ipc->dev,
837 "contexthub status isn't shutdown. fails to reset\n");
838 ret = -EINVAL;
839 }
840 break;
841 case MAILBOX_EVT_SHUTDOWN:
842 /* assert */
843 if (ipc->block_reset) {
844 /* pmu call assert */
845 ret = pmucal_shub_reset_assert();
846 if (ret) {
847 pr_err("%s: reset assert fail\n", __func__);
848 return ret;
849 }
850
851 /* pmu call reset-release_config */
852 ret = pmucal_shub_reset_release_config();
853 if (ret) {
854 pr_err("%s: reset release cfg fail\n", __func__);
855 return ret;
856 }
857
858 /* tzpc setting */
859 ret = exynos_smc(SMC_CMD_CONN_IF,
860 (EXYNOS_SHUB << 32) |
861 EXYNOS_SET_CONN_TZPC, 0, 0);
862 if (ret) {
863 pr_err("%s: TZPC setting fail\n",
864 __func__);
865 return -EINVAL;
866 }
867 dev_info(ipc->dev, "%s: tzpc setted\n", __func__);
868 /* baaw config */
869 contexthub_config_init(ipc);
870 } else {
871 val = __raw_readl(ipc->pmu_chub_reset +
872 REG_CHUB_CPU_STATUS);
873 if (val & (1 << REG_CHUB_CPU_STATUS_BIT_STANDBYWFI)) {
874 val = __raw_readl(ipc->pmu_chub_reset +
875 REG_CHUB_RESET_CHUB_CONFIGURATION);
876 __raw_writel(val & ~(1 << 0),
877 ipc->pmu_chub_reset +
878 REG_CHUB_RESET_CHUB_CONFIGURATION);
879 } else {
880 dev_err(ipc->dev,
881 "fails to shutdown contexthub. cpu_status: 0x%x\n",
882 val);
883 return -EINVAL;
884 }
885 }
886 atomic_set(&ipc->chub_status, CHUB_ST_SHUTDOWN);
887 break;
888 case MAILBOX_EVT_CHUB_ALIVE:
889 ipc_hw_write_shared_reg(AP, AP_WAKE, SR_3);
890 val = contexthub_lowlevel_alive(ipc);
891 if (val) {
892 atomic_set(&ipc->chub_status, CHUB_ST_RUN);
893 dev_info(ipc->dev, "%s : chub is alive", __func__);
894 } else if (ipc->sel_os == true) {
895 dev_err(ipc->dev,
896 "%s : chub isn't alive, should be reset. status:%d, inreset:%d\n",
897 __func__, atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset));
898 if (!atomic_read(&ipc->in_reset)) {
899 #ifdef USE_NO_PANIC_ON_POWERON
900 if (atomic_read(&ipc->chub_status) == CHUB_ST_POWER_ON) {
901 atomic_set(&ipc->chub_status, CHUB_ST_NO_RESPONSE);
902 /* hack don't make panic with chub poweron */
903 contexthub_reset(ipc, 1, CHUB_ERR_NONE);
904 } else {
905 atomic_set(&ipc->chub_status, CHUB_ST_NO_RESPONSE);
906 contexthub_handle_debug(ipc, CHUB_ERR_CHUB_NO_RESPONSE, 0);
907 }
908 #else
909 atomic_set(&ipc->chub_status, CHUB_ST_NO_RESPONSE);
910 contexthub_handle_debug(ipc, CHUB_ERR_CHUB_NO_RESPONSE, 0);
911 #endif
912 } else {
913 dev_info(ipc->dev, "%s: skip to handle debug in reset\n", __func__);
914 }
915 ret = -EINVAL;
916 }
917 break;
918 case MAILBOX_EVT_ENABLE_IRQ:
919 /* if enable, mask from CHUB IRQ, else, unmask from CHUB IRQ */
920 ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INT);
921 ipc_hw_unmask_irq(AP, IRQ_EVT_C2A_INTCLR);
922 break;
923 case MAILBOX_EVT_DISABLE_IRQ:
924 ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INT);
925 ipc_hw_mask_irq(AP, IRQ_EVT_C2A_INTCLR);
926 break;
927 default:
928 need_ipc = 1;
929 break;
930 }
931
932 if (need_ipc) {
933 if (atomic_read(&ipc->chub_status) != CHUB_ST_RUN) {
934 dev_warn(ipc->dev, "%s event:%d/%d fails chub isn't active, status:%d, inreset:%d\n",
935 __func__, event, MAILBOX_EVT_MAX, atomic_read(&ipc->chub_status), atomic_read(&ipc->in_reset));
936 return -EINVAL;
937 }
938 if (contexthub_get_token(ipc))
939 return -EINVAL;
940
941 /* handle ipc */
942 switch (event) {
943 case MAILBOX_EVT_RT_LOGLEVEL:
944 ipc_logbuf_loglevel(ipc->chub_rt_log.loglevel, 1);
945 break;
946 case MAILBOX_EVT_ERASE_SHARED:
947 memset(ipc_get_base(IPC_REG_SHARED), 0, ipc_get_offset(IPC_REG_SHARED));
948 break;
949 case MAILBOX_EVT_DUMP_STATUS:
950 /* dump nanohub kernel status */
951 dev_info(ipc->dev, "Request to dump chub fw status\n");
952 ipc_write_debug_event(AP, (u32)MAILBOX_EVT_DUMP_STATUS);
953 ret = ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
954 break;
955 case MAILBOX_EVT_WAKEUP_CLR:
956 if (atomic_read(&ipc->wakeup_chub) == CHUB_ON) {
957 ret = ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP_CLR);
958 if (ret >= 0)
959 atomic_set(&ipc->wakeup_chub, CHUB_OFF);
960 else
961 dev_warn(ipc->dev, "%s: fails to set wakeup. ret:%d", __func__, ret);
962 }
963 break;
964 case MAILBOX_EVT_WAKEUP:
965 if (atomic_read(&ipc->wakeup_chub) == CHUB_OFF) {
966 ret = ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_WAKEUP);
967 if (ret >= 0)
968 atomic_set(&ipc->wakeup_chub, CHUB_ON);
969 else
970 dev_warn(ipc->dev, "%s: fails to set wakeupclr. ret:%d", __func__, ret);
971 }
972 break;
973 default:
974 /* handle ipc utc */
975 if ((int)event < IPC_DEBUG_UTC_MAX) {
976 ipc->utc_run = event;
977 if ((int)event == IPC_DEBUG_UTC_TIME_SYNC)
978 check_rtc_time();
979 ipc_write_debug_event(AP, (u32)event);
980 ret = ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_DEBUG);
981 }
982 break;
983 }
984 contexthub_put_token(ipc);
985
986 /* add slient reset with write event error */
987 if ((event == MAILBOX_EVT_WAKEUP_CLR) || (event == MAILBOX_EVT_WAKEUP)) {
988 if (ret < 0)
989 contexthub_handle_debug(ipc, CHUB_ERR_EVTQ_ADD, 0);
990 else
991 clear_err_cnt(ipc, CHUB_ERR_EVTQ_ADD);
992 }
993 }
994 return ret;
995 }
996
997 int contexthub_poweron(struct contexthub_ipc_info *ipc)
998 {
999 int ret = 0;
1000 struct device *dev = ipc->dev;
1001 struct chub_bootargs *map;
1002
1003 if (!atomic_read(&ipc->chub_status)) {
1004 memset_io(ipc->sram, 0, ipc->sram_size);
1005 ret = contexthub_download_image(ipc, IPC_REG_BL);
1006 if (ret) {
1007 dev_warn(dev, "fails to download bootloader\n");
1008 return ret;
1009 }
1010
1011 if (ipc_get_offset(IPC_REG_DUMP) != ipc->sram_size)
1012 dev_warn(dev, "sram size doen't match kernel:%d, fw:%d\n", ipc->sram_size, ipc_get_offset(IPC_REG_DUMP));
1013
1014 ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_INIT_IPC);
1015 if (ret) {
1016 dev_warn(dev, "fails to init ipc\n");
1017 return ret;
1018 }
1019
1020 if(!strcmp(ipc->os_name, "os.checked_0.bin") || ipc->os_name[0] != 'o') {
1021 map = ipc_get_base(IPC_REG_BL_MAP);
1022 ipc->sel_os = !(map->bootmode);
1023 } else
1024 dev_info(dev, "saved os_name: %s", ipc->os_name);
1025
1026 ret = contexthub_download_image(ipc, IPC_REG_OS);
1027 if (ret) {
1028 dev_warn(dev, "fails to download kernel\n");
1029 return ret;
1030 }
1031
1032 ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_POWER_ON);
1033 if (ret) {
1034 dev_warn(dev, "fails to poweron\n");
1035 return ret;
1036 }
1037 if (atomic_read(&ipc->chub_status) == CHUB_ST_RUN)
1038 dev_info(dev, "%s: contexthub power-on", __func__);
1039 else {
1040 if (ipc->sel_os)
1041 dev_warn(dev, "contexthub failed to power-on");
1042 else {
1043 dev_info(dev, "%s: wait for multi-os poweron\n", __func__);
1044 ret = chub_wait_event(&ipc->poweron_lock);
1045 dev_info(dev, "%s: multi-os poweron %s, status:%d, ret:%d, flag:%d\n", __func__,
1046 atomic_read(&ipc->chub_status) == CHUB_ST_RUN ? "success" : "fails",
1047 atomic_read(&ipc->chub_status), ret, ipc->poweron_lock.flag);
1048 }
1049 }
1050 }
1051
1052 return ret;
1053 }
1054
1055 static int contexthub_download_and_check_image(struct contexthub_ipc_info *ipc, enum ipc_region reg)
1056 {
1057 u32 *fw = vmalloc(ipc_get_offset(reg));
1058 int ret = 0;
1059
1060 if (!fw)
1061 return contexthub_download_image(ipc, reg);
1062
1063 memcpy_fromio(fw, ipc_get_base(reg), ipc_get_offset(reg));
1064 ret = contexthub_download_image(ipc, reg);
1065 if (ret) {
1066 dev_err(ipc->dev, "%s: download bl(%d) fails\n", __func__, reg == IPC_REG_BL);
1067 goto out;
1068 }
1069
1070 ret = memcmp(fw, ipc_get_base(reg), ipc_get_offset(reg));
1071 if (ret) {
1072 int i;
1073 u32 *fw_image = (u32 *)ipc_get_base(reg);
1074
1075 dev_err(ipc->dev, "%s: fw(%lx) doens't match with size %d\n",
1076 __func__, (unsigned long)ipc_get_base(reg), ipc_get_offset(reg));
1077 for (i = 0; i < ipc_get_offset(reg) / 4; i++)
1078 if (fw[i] != fw_image[i]) {
1079 dev_err(ipc->dev, "fw[%d] %x -> wrong %x\n", i, fw_image[i], fw[i]);
1080 print_hex_dump(KERN_CONT, "before:", DUMP_PREFIX_OFFSET, 16, 1, &fw[i], 64, false);
1081 print_hex_dump(KERN_CONT, "after:", DUMP_PREFIX_OFFSET, 16, 1, &fw_image[i], 64, false);
1082 ret = -EINVAL;
1083 break;
1084 }
1085 }
1086 out:
1087 dev_info(ipc->dev, "%s: download and checked bl(%d) ret:%d \n", __func__, reg == IPC_REG_BL, ret);
1088 vfree(fw);
1089 return ret;
1090 }
1091
1092 int contexthub_reset(struct contexthub_ipc_info *ipc, bool force_load, enum chub_err_type err)
1093 {
1094 int ret;
1095 int trycnt = 0;
1096
1097 dev_info(ipc->dev, "%s: force:%d, status:%d, in-reset:%d, err:%d, user:%d\n",
1098 __func__, force_load, atomic_read(&ipc->chub_status),
1099 atomic_read(&ipc->in_reset), err, atomic_read(&ipc->in_use_ipc));
1100 mutex_lock(&reset_mutex);
1101 if (!force_load && (atomic_read(&ipc->chub_status) == CHUB_ST_RUN)) {
1102 mutex_unlock(&reset_mutex);
1103 dev_info(ipc->dev, "%s: out status:%d\n", __func__, atomic_read(&ipc->chub_status));
1104 return 0;
1105 }
1106 atomic_inc(&ipc->in_reset);
1107
1108 /* wait for ipc free */
1109 do {
1110 msleep(WAIT_CHUB_MS);
1111 if (++trycnt > RESET_WAIT_TRY_CNT) {
1112 dev_info(ipc->dev, "%s: can't get lock. in_use_ipc: %d\n",
1113 __func__, atomic_read(&ipc->in_use_ipc));
1114 ret = -EINVAL;
1115 goto out;
1116 }
1117 dev_info(ipc->dev, "%s: wait for ipc user free: %d\n",
1118 __func__, atomic_read(&ipc->in_use_ipc));
1119 } while (atomic_read(&ipc->in_use_ipc));
1120
1121 /* debug dump */
1122 chub_dbg_dump_hw(ipc, err);
1123
1124 dev_info(ipc->dev, "%s: start reset status:%d\n", __func__, atomic_read(&ipc->chub_status));
1125
1126 if (!ipc->block_reset) {
1127 /* core reset */
1128 ipc_add_evt(IPC_EVT_A2C, IRQ_EVT_A2C_SHUTDOWN);
1129 msleep(100); /* wait for shut down time */
1130 }
1131
1132 /* shutdown */
1133 mutex_lock(&pmu_shutdown_mutex);
1134 dev_info(ipc->dev, "%s: enter shutdown\n", __func__);
1135 ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_SHUTDOWN);
1136 if (ret) {
1137 dev_err(ipc->dev, "%s: shutdown fails, ret:%d\n", __func__, ret);
1138 mutex_unlock(&pmu_shutdown_mutex);
1139 goto out;
1140 }
1141 dev_info(ipc->dev, "%s: out shutdown\n", __func__);
1142 mutex_unlock(&pmu_shutdown_mutex);
1143
1144 /* image download */
1145 dev_info(ipc->dev, "%s: clear ipc:%p, %d\n", __func__, ipc_get_base(IPC_REG_IPC), ipc_get_offset(IPC_REG_IPC));
1146 memset_io(ipc_get_base(IPC_REG_IPC_A2C), 0, ipc_get_offset(IPC_REG_IPC_A2C));
1147 memset_io(ipc_get_base(IPC_REG_IPC_C2A), 0, ipc_get_offset(IPC_REG_IPC_C2A));
1148 memset_io(ipc_get_base(IPC_REG_IPC_EVT_A2C), 0, ipc_get_offset(IPC_REG_IPC_EVT_A2C));
1149 memset_io(ipc_get_base(IPC_REG_IPC_EVT_C2A), 0, ipc_get_offset(IPC_REG_IPC_EVT_C2A));
1150 memset_io(ipc_get_base(IPC_REG_LOG), 0, ipc_get_offset(IPC_REG_LOG));
1151 if (ipc->block_reset || force_load) {
1152 ret = contexthub_download_image(ipc, IPC_REG_BL);
1153 if (!ret) {
1154 if (force_load) /* can use new binary */
1155 ret = contexthub_download_image(ipc, IPC_REG_OS);
1156 else /* use previous binary */
1157 ret = contexthub_download_and_check_image(ipc, IPC_REG_OS);
1158
1159 if (ret) {
1160 dev_err(ipc->dev, "%s: download os fails\n", __func__);
1161 ret = -EINVAL;
1162 goto out;
1163 }
1164 } else {
1165 dev_err(ipc->dev, "%s: download bl fails\n", __func__);
1166 ret = -EINVAL;
1167 goto out;
1168 }
1169 }
1170 /* reset */
1171 ret = contexthub_ipc_write_event(ipc, MAILBOX_EVT_RESET);
1172 if (ret) {
1173 dev_err(ipc->dev, "%s: reset fails, ret:%d\n", __func__, ret);
1174 } else {
1175 dev_info(ipc->dev, "%s: chub reseted! (cnt:%d)\n",
1176 __func__, ipc->err_cnt[CHUB_ERR_RESET_CNT]);
1177 ipc->err_cnt[CHUB_ERR_RESET_CNT]++;
1178 atomic_set(&ipc->in_use_ipc, 0);
1179 }
1180 out:
1181 msleep(100); /* wakeup delay */
1182 chub_wake_event(&ipc->reset_lock);
1183 atomic_dec(&ipc->in_reset);
1184 mutex_unlock(&reset_mutex);
1185
1186 return ret;
1187 }
1188
1189 int contexthub_download_image(struct contexthub_ipc_info *ipc, enum ipc_region reg)
1190 {
1191 const struct firmware *entry;
1192 int ret;
1193
1194 if (reg == IPC_REG_BL) {
1195 dev_info(ipc->dev, "%s: download bl\n", __func__);
1196 ret = request_firmware(&entry, "bl.unchecked.bin", ipc->dev);
1197 }
1198 else if (reg == IPC_REG_OS) {
1199 dev_info(ipc->dev, "%s: download %s\n", __func__, ipc->os_name);
1200 ret = request_firmware(&entry, ipc->os_name, ipc->dev);
1201 }
1202 else
1203 ret = -EINVAL;
1204
1205 if (ret) {
1206 dev_err(ipc->dev, "%s, bl(%d) request_firmware failed\n",
1207 reg == IPC_REG_BL, __func__);
1208 return ret;
1209 }
1210 memcpy_toio(ipc_get_base(reg), entry->data, entry->size);
1211 dev_info(ipc->dev, "%s: bl:%d, bin(size:%d) on %lx\n",
1212 __func__, reg == IPC_REG_BL, (int)entry->size, (unsigned long)ipc_get_base(reg));
1213 release_firmware(entry);
1214
1215 return 0;
1216 }
1217
1218 static void handle_irq(struct contexthub_ipc_info *ipc, enum irq_evt_chub evt)
1219 {
1220 int err;
1221
1222 switch (evt) {
1223 case IRQ_EVT_C2A_DEBUG:
1224 err = (ipc_read_debug_event(AP) == IPC_DEBUG_CHUB_FAULT) ? CHUB_ERR_FW_FAULT : CHUB_ERR_NANOHUB;
1225 dev_err(ipc->dev, "%s: c2a_debug: debug:%d, err:%d\n", __func__, ipc_read_debug_event(AP), err);
1226 contexthub_handle_debug(ipc, err, 1);
1227 break;
1228 case IRQ_EVT_C2A_INT:
1229 if (atomic_read(&ipc->irq1_apInt) == C2A_OFF) {
1230 atomic_set(&ipc->irq1_apInt, C2A_ON);
1231 contexthub_notify_host(ipc);
1232 }
1233 break;
1234 case IRQ_EVT_C2A_INTCLR:
1235 atomic_set(&ipc->irq1_apInt, C2A_OFF);
1236 break;
1237 case IRQ_EVT_C2A_LOG:
1238 break;
1239 default:
1240 if (evt < IRQ_EVT_CH_MAX) {
1241 int lock;
1242
1243 atomic_inc(&ipc->read_lock.cnt);
1244 /* TODO: requered.. ? */
1245 spin_lock(&ipc->read_lock.event.lock);
1246 lock = atomic_read(&ipc->read_lock.flag);
1247 spin_unlock(&ipc->read_lock.event.lock);
1248 if (lock)
1249 wake_up_interruptible_sync(&ipc->read_lock.event);
1250 } else {
1251 dev_warn(ipc->dev, "%s: invalid %d event",
1252 __func__, evt);
1253 return;
1254 }
1255 break;
1256 };
1257 if (ipc_logbuf_filled() && !atomic_read(&ipc->log_work_active)) {
1258 ipc->log_work_reqcnt++; /* debug */
1259 atomic_set(&ipc->log_work_active, 1);
1260 schedule_work(&ipc->log_work);
1261 }
1262 }
1263
1264 static irqreturn_t contexthub_irq_handler(int irq, void *data)
1265 {
1266 struct contexthub_ipc_info *ipc = data;
1267 int start_index = ipc_hw_read_int_start_index(AP);
1268 unsigned int status = ipc_hw_read_int_status_reg(AP);
1269 struct ipc_evt_buf *cur_evt;
1270 enum chub_err_type err = 0;
1271 enum irq_chub evt = 0;
1272 int irq_num = IRQ_EVT_CHUB_ALIVE + start_index;
1273 u32 status_org = status; /* for debug */
1274 struct ipc_evt *ipc_evt = ipc_get_base(IPC_REG_IPC_EVT_C2A);
1275
1276 /* chub alive interrupt handle */
1277 if (status & (1 << irq_num)) {
1278 status &= ~(1 << irq_num);
1279 ipc_hw_clear_int_pend_reg(AP, irq_num);
1280 if (atomic_read(&ipc->chub_status) == CHUB_ST_POWER_ON && ipc->sel_os == false) {
1281 schedule_work(&ipc->debug_work);
1282 return IRQ_HANDLED;
1283 }
1284
1285 /* set wakeup flag for chub_alive_lock */
1286 chub_wake_event(&ipc->chub_alive_lock);
1287 }
1288 irq_num = IRQ_EVT_C2A_LOG + start_index;
1289 if (status & (1 << irq_num)) {
1290 status &= ~(1 << irq_num);
1291 ipc_hw_clear_int_pend_reg(AP, irq_num);
1292 handle_irq(ipc, IRQ_EVT_C2A_LOG);
1293 }
1294
1295 #ifdef CHECK_HW_TRIGGER
1296 /* chub ipc interrupt handle */
1297 while (status) {
1298 cur_evt = ipc_get_evt(IPC_EVT_C2A);
1299
1300 if (cur_evt) {
1301 evt = cur_evt->evt;
1302 irq_num = cur_evt->irq + start_index;
1303
1304 if (!ipc_evt->ctrl.pending[cur_evt->irq])
1305 CSP_PRINTF_ERROR
1306 ("%s: no-sw-trigger irq:%d(%d+%d), evt:%d, status:0x%x->0x%x(SR:0x%x)\n", __func__,
1307 irq_num, cur_evt->irq, start_index, evt, status_org, status, ipc_hw_read_int_status_reg(AP));
1308
1309 /* check match evtq and hw interrupt pending */
1310 if (!(status & (1 << irq_num))) {
1311 err = CHUB_ERR_EVTQ_NO_HW_TRIGGER;
1312 CSP_PRINTF_ERROR
1313 ("%s: no-hw-trigger irq:%d(%d+%d), evt:%d, status:0x%x->0x%x(SR:0x%x)\n", __func__,
1314 irq_num, cur_evt->irq, start_index, evt, status_org, status, ipc_hw_read_int_status_reg(AP));
1315 }
1316 } else {
1317 err = CHUB_ERR_EVTQ_EMTPY;
1318 CSP_PRINTF_ERROR
1319 ("%s: evt-empty irq:%d(%d), evt:%d, status:0x%x->0x%x(SR:0x%x)\n", __func__,
1320 irq_num, start_index, evt, status_org, status, ipc_hw_read_int_status_reg(AP));
1321 break;
1322 }
1323 ipc_hw_clear_int_pend_reg(AP, irq_num);
1324 ipc_evt->ctrl.pending[cur_evt->irq] = 0;
1325 handle_irq(ipc, (u32)evt);
1326 status &= ~(1 << irq_num);
1327 }
1328 #else
1329 if (status) {
1330 int i;
1331
1332 for (i = start_index; i < irq_num; i++) {
1333
1334 if (status & (1 << i)) {
1335 cur_evt = ipc_get_evt(IPC_EVT_C2A);
1336 if (cur_evt) {
1337 evt = cur_evt->evt;
1338 handle_irq(ipc, (u32)evt);
1339 ipc_hw_clear_int_pend_reg(AP, i);
1340 } else {
1341 err = CHUB_ERR_EVTQ_EMTPY;
1342 break;
1343 }
1344 }
1345 }
1346 }
1347 #endif
1348
1349 if (err) {
1350 dev_err(ipc->dev, "nanohub: inval irq err(%d):start_irqnum:%d,evt(%p):%d,irq_hw:%d,status_reg:0x%x->0x%x(0x%x,0x%x)\n",
1351 err, start_index, cur_evt, evt, irq_num,
1352 status_org, status, ipc_hw_read_int_status_reg(AP),
1353 ipc_hw_read_int_gen_reg(AP));
1354 ipc_hw_clear_all_int_pend_reg(AP);
1355 contexthub_handle_debug(ipc, err, 1);
1356 } else {
1357 clear_err_cnt(ipc, CHUB_ERR_EVTQ_EMTPY);
1358 clear_err_cnt(ipc, CHUB_ERR_EVTQ_NO_HW_TRIGGER);
1359 }
1360 return IRQ_HANDLED;
1361 }
1362
1363 #if defined(CHUB_RESET_ENABLE)
1364 static irqreturn_t contexthub_irq_wdt_handler(int irq, void *data)
1365 {
1366 struct contexthub_ipc_info *ipc = data;
1367
1368 dev_info(ipc->dev, "%s called\n", __func__);
1369 disable_irq_nosync(ipc->irq_wdt);
1370 ipc->irq_wdt_disabled = 1;
1371 contexthub_handle_debug(ipc, CHUB_ERR_FW_WDT, 1);
1372
1373 return IRQ_HANDLED;
1374 }
1375 #endif
1376
1377 static struct clk *devm_clk_get_and_prepare(struct device *dev,
1378 const char *name)
1379 {
1380 struct clk *clk = NULL;
1381 int ret;
1382
1383 clk = devm_clk_get(dev, name);
1384 if (IS_ERR(clk)) {
1385 dev_err(dev, "Failed to get clock %s\n", name);
1386 goto error;
1387 }
1388
1389 ret = clk_prepare(clk);
1390 if (ret < 0) {
1391 dev_err(dev, "Failed to prepare clock %s\n", name);
1392 goto error;
1393 }
1394
1395 ret = clk_enable(clk);
1396 if (ret < 0) {
1397 dev_err(dev, "Failed to enable clock %s\n", name);
1398 goto error;
1399 }
1400
1401 error:
1402 return clk;
1403 }
1404
1405 #if defined(CONFIG_SOC_EXYNOS9610)
1406 extern int cal_dll_apm_enable(void);
1407 #endif
1408
1409 static void __iomem *get_iomem(struct platform_device *pdev,
1410 const char *name, u32 *size)
1411 {
1412 struct resource *res;
1413 void __iomem *ret;
1414
1415 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1416 if (IS_ERR_OR_NULL(res)) {
1417 dev_err(&pdev->dev, "Failed to get %s\n", name);
1418 return ERR_PTR(-EINVAL);
1419 }
1420
1421 ret = devm_ioremap_resource(&pdev->dev, res);
1422 if (IS_ERR(ret)) {
1423 dev_err(&pdev->dev, "fails to get %s\n", name);
1424 return ERR_PTR(-EINVAL);
1425 }
1426
1427 if (size)
1428 *size = resource_size(res);
1429 dev_info(&pdev->dev, "%s: %s(%p) is mapped on %p with size of %zu",
1430 __func__, name, (void *)res->start, ret, (size_t)resource_size(res));
1431
1432 return ret;
1433 }
1434
1435 static __init int contexthub_ipc_hw_init(struct platform_device *pdev,
1436 struct contexthub_ipc_info *chub)
1437 {
1438 int ret;
1439 int irq;
1440 struct resource *res;
1441 const char *os;
1442 const char *resetmode;
1443 const char *selectos;
1444 struct device *dev = &pdev->dev;
1445 struct device_node *node = dev->of_node;
1446 const char *string_array[10];
1447 int chub_clk_len;
1448 struct clk *clk;
1449 int i;
1450
1451 if (!node) {
1452 dev_err(dev, "driver doesn't support non-dt\n");
1453 return -ENODEV;
1454 }
1455
1456 /* get os type from dt */
1457 os = of_get_property(node, "os-type", NULL);
1458 if (!os || !strcmp(os, "none") || !strcmp(os, "pass")) {
1459 dev_err(dev, "no use contexthub\n");
1460 chub->os_load = 0;
1461 return -ENODEV;
1462 } else {
1463 chub->os_load = 1;
1464 strcpy(chub->os_name, os);
1465 }
1466
1467 /* get resetmode from dt */
1468 resetmode = of_get_property(node, "reset-mode", NULL);
1469 if (!resetmode || !strcmp(resetmode, "block"))
1470 chub->block_reset = 1;
1471 else
1472 chub->block_reset = 0;
1473
1474 /* get os select from dt */
1475 selectos = of_get_property(node, "os-select", NULL);
1476 if (!selectos || strcmp(selectos, "true")) {
1477 dev_info(dev,"multi os disabled : %s\n", selectos);
1478 chub->sel_os = true;
1479 } else {
1480 dev_info(dev,"multi os enabled : %s\n", selectos);
1481 chub->sel_os = false;
1482 }
1483
1484 /* get mailbox interrupt */
1485 chub->irq_mailbox = irq_of_parse_and_map(node, 0);
1486 if (chub->irq_mailbox < 0) {
1487 dev_err(dev, "failed to get irq:%d\n", irq);
1488 return -EINVAL;
1489 }
1490
1491 /* request irq handler */
1492 ret = devm_request_irq(dev, chub->irq_mailbox, contexthub_irq_handler,
1493 0, dev_name(dev), chub);
1494 if (ret) {
1495 dev_err(dev, "failed to request irq:%d, ret:%d\n",
1496 chub->irq_mailbox, ret);
1497 return ret;
1498 }
1499
1500 #if defined(CHUB_RESET_ENABLE)
1501 /* get wdt interrupt optionally */
1502 chub->irq_wdt = irq_of_parse_and_map(node, 1);
1503 if (chub->irq_wdt > 0) {
1504 /* request irq handler */
1505 ret = devm_request_irq(dev, chub->irq_wdt,
1506 contexthub_irq_wdt_handler, 0,
1507 dev_name(dev), chub);
1508 if (ret) {
1509 dev_err(dev, "failed to request wdt irq:%d, ret:%d\n",
1510 chub->irq_wdt, ret);
1511 return ret;
1512 }
1513 chub->irq_wdt_disabled = 0;
1514 } else {
1515 dev_info(dev, "don't use wdt irq:%d\n", irq);
1516 }
1517 #endif
1518
1519 /* get MAILBOX SFR */
1520 chub->mailbox = get_iomem(pdev, "mailbox", NULL);
1521 if (IS_ERR(chub->mailbox))
1522 return PTR_ERR(chub->mailbox);
1523
1524 /* get SRAM base */
1525 chub->sram = get_iomem(pdev, "sram", &chub->sram_size);
1526 if (IS_ERR(chub->sram))
1527 return PTR_ERR(chub->sram);
1528
1529 /* get chub gpr base */
1530 chub->chub_dumpgpr = get_iomem(pdev, "dumpgpr", NULL);
1531 if (IS_ERR(chub->chub_dumpgpr))
1532 return PTR_ERR(chub->chub_dumpgpr);
1533
1534 chub->pmu_chub_reset = get_iomem(pdev, "chub_reset", NULL);
1535 if (IS_ERR(chub->pmu_chub_reset))
1536 return PTR_ERR(chub->pmu_chub_reset);
1537
1538 chub->chub_baaw = get_iomem(pdev, "chub_baaw", NULL);
1539 if (IS_ERR(chub->chub_baaw))
1540 return PTR_ERR(chub->chub_baaw);
1541
1542 #if defined(CONFIG_SOC_EXYNOS9610)
1543 /* get cmu qch base */
1544 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu_chub_qch");
1545 chub->cmu_chub_qch = devm_ioremap_resource(dev, res);
1546 if (IS_ERR(chub->cmu_chub_qch)) {
1547 pr_err("driver failed to get cmu_chub_qch\n");
1548 return PTR_ERR(chub->cmu_chub_qch);
1549 }
1550 #endif
1551
1552 /* get addresses information to set BAAW */
1553 if (of_property_read_u32_index
1554 (node, "baaw,baaw-p-apm-chub", 0,
1555 &chub->baaw_info.baaw_p_apm_chub_start)) {
1556 dev_err(&pdev->dev,
1557 "driver failed to get baaw-p-apm-chub, start\n");
1558 return -ENODEV;
1559 }
1560
1561 if (of_property_read_u32_index
1562 (node, "baaw,baaw-p-apm-chub", 1,
1563 &chub->baaw_info.baaw_p_apm_chub_end)) {
1564 dev_err(&pdev->dev,
1565 "driver failed to get baaw-p-apm-chub, end\n");
1566 return -ENODEV;
1567 }
1568
1569 if (of_property_read_u32_index
1570 (node, "baaw,baaw-p-apm-chub", 2,
1571 &chub->baaw_info.baaw_p_apm_chub_remap)) {
1572 dev_err(&pdev->dev,
1573 "driver failed to get baaw-p-apm-chub, remap\n");
1574 return -ENODEV;
1575 }
1576
1577 /* disable chub irq list (for sensor irq) */
1578 of_property_read_u32(node, "chub-irq-pin-len", &chub->irq_pin_len);
1579 if (chub->irq_pin_len) {
1580 if (chub->irq_pin_len > sizeof(chub->irq_pins)) {
1581 dev_err(&pdev->dev,
1582 "failed to get irq pin length %d, %d\n",
1583 chub->irq_pin_len, sizeof(chub->irq_pins));
1584 chub->irq_pin_len = 0;
1585 return -ENODEV;
1586 }
1587
1588 dev_info(&pdev->dev, "get chub irq_pin len:%d\n", chub->irq_pin_len);
1589 for (i = 0; i < chub->irq_pin_len; i++) {
1590 chub->irq_pins[i] = of_get_named_gpio(node, "chub-irq-pin", i);
1591 if (!gpio_is_valid(chub->irq_pins[i])) {
1592 dev_err(&pdev->dev, "get invalid chub irq_pin:%d\n", chub->irq_pins[i]);
1593 return -EINVAL;
1594 }
1595 dev_info(&pdev->dev, "get chub irq_pin:%d\n", chub->irq_pins[i]);
1596 }
1597 }
1598 #if defined(CONFIG_SOC_EXYNOS9610)
1599 cal_dll_apm_enable();
1600 #endif
1601
1602 clk = devm_clk_get_and_prepare(dev, "chub_bus");
1603 if (!clk)
1604 return -ENODEV;
1605 chub->clkrate = clk_get_rate(clk);
1606
1607 chub_clk_len = of_property_count_strings(node, "clock-names");
1608 of_property_read_string_array(node, "clock-names", string_array, chub_clk_len);
1609 for (i = 0; i < chub_clk_len; i++) {
1610 clk = devm_clk_get_and_prepare(dev, string_array[i]);
1611 if (!clk)
1612 return -ENODEV;
1613 dev_info(&pdev->dev, "clk_name: %s enable\n", __clk_get_name(clk));
1614 }
1615
1616 return 0;
1617 }
1618
1619 static ssize_t chub_poweron(struct device *dev,
1620 struct device_attribute *attr,
1621 const char *buf, size_t count)
1622 {
1623 struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
1624 int ret = contexthub_poweron(ipc);
1625
1626 return ret < 0 ? ret : count;
1627 }
1628
1629 static ssize_t chub_reset(struct device *dev,
1630 struct device_attribute *attr,
1631 const char *buf, size_t count)
1632 {
1633 struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
1634 int ret = contexthub_reset(ipc, 1, CHUB_ERR_NONE);
1635
1636 return ret < 0 ? ret : count;
1637 }
1638
1639 static struct device_attribute attributes[] = {
1640 __ATTR(poweron, 0220, NULL, chub_poweron),
1641 __ATTR(reset, 0220, NULL, chub_reset),
1642 };
1643
1644 #ifdef CONFIG_EXYNOS_ITMON
1645 static int chub_itmon_notifier(struct notifier_block *nb,
1646 unsigned long action, void *nb_data)
1647 {
1648 struct contexthub_ipc_info *data = container_of(nb, struct contexthub_ipc_info, itmon_nb);
1649 struct itmon_notifier *itmon_data = nb_data;
1650
1651 if (itmon_data && itmon_data->master &&
1652 ((!strncmp("CM4_SHUB_CD", itmon_data->master, sizeof("CM4_SHUB_CD") - 1)) ||
1653 (!strncmp("CM4_SHUB_P", itmon_data->master, sizeof("CM4_SHUB_P") - 1)) ||
1654 (!strncmp("PDMA_SHUB", itmon_data->master, sizeof("PDMA_SHUB") - 1)))) {
1655 dev_info(data->dev, "%s: chub(%s) itmon detected: action:%d!!\n",
1656 __func__, itmon_data->master, action);
1657 contexthub_handle_debug(data, CHUB_ERR_ITMON, 1);
1658 return NOTIFY_OK;
1659 }
1660
1661 return NOTIFY_DONE;
1662 }
1663 #endif
1664
1665 static int contexthub_ipc_probe(struct platform_device *pdev)
1666 {
1667 struct contexthub_ipc_info *chub;
1668 int need_to_free = 0;
1669 int ret = 0;
1670 int i;
1671 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1672 struct iio_dev *iio_dev;
1673 #endif
1674 chub = chub_dbg_get_memory(DBG_NANOHUB_DD_AREA);
1675 if (!chub) {
1676 chub =
1677 devm_kzalloc(&pdev->dev, sizeof(struct contexthub_ipc_info),
1678 GFP_KERNEL);
1679 need_to_free = 1;
1680 }
1681 if (IS_ERR(chub)) {
1682 dev_err(&pdev->dev, "%s failed to get ipc memory\n", __func__);
1683 ret = -EINVAL;
1684 goto err;
1685 }
1686
1687 /* parse dt and hw init */
1688 ret = contexthub_ipc_hw_init(pdev, chub);
1689 if (ret) {
1690 dev_err(&pdev->dev, "%s failed to get init hw with ret %d\n",
1691 __func__, ret);
1692 goto err;
1693 }
1694
1695 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1696 /* nanohub probe */
1697 iio_dev = nanohub_probe(&pdev->dev, NULL);
1698 if (IS_ERR(iio_dev))
1699 goto err;
1700
1701 /* set wakeup irq number on nanohub driver */
1702 chub->data = iio_priv(iio_dev);
1703 nanohub_mailbox_comms_init(&chub->data->comms);
1704 chub->pdata = chub->data->pdata;
1705 chub->pdata->mailbox_client = chub;
1706 chub->data->irq1 = IRQ_EVT_A2C_WAKEUP;
1707 chub->data->irq2 = 0;
1708 #endif
1709 chub->chub_rt_log.loglevel = 0;
1710 spin_lock_init(&chub->logout_lock);
1711 atomic_set(&chub->in_use_ipc, 0);
1712 atomic_set(&chub->chub_status, CHUB_ST_NO_POWER);
1713 atomic_set(&chub->in_reset, 0);
1714 chub->powermode = 0; /* updated by fw bl */
1715 chub->cur_err = 0;
1716 for (i = 0; i < CHUB_ERR_MAX; i++)
1717 chub->err_cnt[i] = 0;
1718 chub->dev = &pdev->dev;
1719 platform_set_drvdata(pdev, chub);
1720 contexthub_config_init(chub);
1721
1722 for (i = 0, ret = 0; i < ARRAY_SIZE(attributes); i++) {
1723 ret = device_create_file(chub->dev, &attributes[i]);
1724 if (ret)
1725 dev_warn(chub->dev, "Failed to create file: %s\n",
1726 attributes[i].attr.name);
1727 }
1728 init_waitqueue_head(&chub->poweron_lock.event);
1729 init_waitqueue_head(&chub->reset_lock.event);
1730 init_waitqueue_head(&chub->read_lock.event);
1731 init_waitqueue_head(&chub->chub_alive_lock.event);
1732 atomic_set(&chub->poweron_lock.flag, 0);
1733 atomic_set(&chub->chub_alive_lock.flag, 0);
1734 INIT_WORK(&chub->debug_work, handle_debug_work_func);
1735 INIT_WORK(&chub->log_work, handle_log_work_func);
1736 chub->log_work_reqcnt = 0;
1737 #ifdef CONFIG_EXYNOS_ITMON
1738 chub->itmon_nb.notifier_call = chub_itmon_notifier;
1739 itmon_notifier_chain_register(&chub->itmon_nb);
1740 #endif
1741
1742 /* init fw runtime log */
1743 chub->chub_rt_log.buffer = vzalloc(SZ_512K * 2);
1744 if (!chub->chub_rt_log.buffer) {
1745 ret = -ENOMEM;
1746 goto err;
1747 }
1748 chub->chub_rt_log.buffer_size = SZ_512K * 2;
1749 chub->chub_rt_log.write_index = 0;
1750
1751 dev_info(chub->dev, "%s with %s FW and %lu clk is done\n",
1752 __func__, chub->os_name, chub->clkrate);
1753 return 0;
1754 err:
1755 if (chub)
1756 if (need_to_free)
1757 devm_kfree(&pdev->dev, chub);
1758
1759 dev_err(&pdev->dev, "%s is fail with ret %d\n", __func__, ret);
1760 return ret;
1761 }
1762
1763 static int contexthub_ipc_remove(struct platform_device *pdev)
1764 {
1765 return 0;
1766 }
1767
1768 static int contexthub_suspend(struct device *dev)
1769 {
1770 struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
1771 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1772 struct nanohub_data *data = ipc->data;
1773 #endif
1774
1775 if (atomic_read(&ipc->chub_status) != CHUB_ST_RUN)
1776 return 0;
1777
1778 dev_info(dev, "nanohub log to kernel off\n");
1779 ipc_hw_write_shared_reg(AP, AP_SLEEP, SR_3);
1780 ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
1781
1782 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1783 return nanohub_suspend(data->iio_dev);
1784 #else
1785 return 0;
1786 #endif
1787 }
1788
1789 static int contexthub_resume(struct device *dev)
1790 {
1791 struct contexthub_ipc_info *ipc = dev_get_drvdata(dev);
1792 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1793 struct nanohub_data *data = ipc->data;
1794 #endif
1795
1796 int ret;
1797 if (atomic_read(&ipc->chub_status) != CHUB_ST_RUN)
1798 return 0;
1799
1800 dev_info(dev, "nanohub log to kernel on\n");
1801 ipc_hw_write_shared_reg(AP, AP_WAKE, SR_3);
1802 ipc_hw_gen_interrupt(AP, IRQ_EVT_CHUB_ALIVE);
1803
1804 #ifdef CONFIG_CHRE_SENSORHUB_HAL
1805 ret = nanohub_resume(data->iio_dev);
1806 #endif
1807 return 0;
1808 }
1809
1810 //static SIMPLE_DEV_PM_OPS(contexthub_pm_ops, contexthub_suspend, contexthub_resume);
1811 static const struct dev_pm_ops contexthub_pm_ops = {
1812 .suspend = contexthub_suspend,
1813 .resume = contexthub_resume,
1814 };
1815
1816 static const struct of_device_id contexthub_ipc_match[] = {
1817 {.compatible = "samsung,exynos-nanohub"},
1818 {},
1819 };
1820
1821 static struct platform_driver samsung_contexthub_ipc_driver = {
1822 .probe = contexthub_ipc_probe,
1823 .remove = contexthub_ipc_remove,
1824 .driver = {
1825 .name = "nanohub-ipc",
1826 .owner = THIS_MODULE,
1827 .of_match_table = contexthub_ipc_match,
1828 .pm = &contexthub_pm_ops,
1829 },
1830 };
1831
1832 int nanohub_mailbox_init(void)
1833 {
1834 return platform_driver_register(&samsung_contexthub_ipc_driver);
1835 }
1836
1837 static void __exit nanohub_mailbox_cleanup(void)
1838 {
1839 platform_driver_unregister(&samsung_contexthub_ipc_driver);
1840 }
1841
1842 module_init(nanohub_mailbox_init);
1843 module_exit(nanohub_mailbox_cleanup);
1844
1845 MODULE_LICENSE("GPL v2");
1846 MODULE_DESCRIPTION("Exynos contexthub mailbox Driver");
1847 MODULE_AUTHOR("Boojin Kim <boojin.kim@samsung.com>");