3 * MediaTek <www.MediaTek.com>
5 * Android Exception Device
8 #include <linux/cdev.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
12 #include <linux/hardirq.h>
13 #include <linux/init.h>
14 #include <linux/kallsyms.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/poll.h>
18 #include <linux/proc_fs.h>
19 #include <linux/wait.h>
20 #include <linux/sched.h>
21 #include <linux/vmalloc.h>
22 #include <linux/disp_assert_layer.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/semaphore.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/stacktrace.h>
29 #include <linux/compat.h>
30 #include <linux/aee.h>
31 #include <linux/seq_file.h>
34 struct aee_req_queue
{
35 struct list_head list
;
39 static struct aee_req_queue ke_queue
;
40 static struct work_struct ke_work
;
41 static DEFINE_SEMAPHORE(aed_ke_sem
);
43 static struct aee_req_queue ee_queue
;
44 static struct work_struct ee_work
;
45 static DEFINE_SEMAPHORE(aed_ee_sem
);
47 * may be accessed from irq
49 static spinlock_t aed_device_lock
;
50 int aee_mode
= AEE_MODE_CUSTOMER_USER
;
51 static int force_red_screen
= AEE_FORCE_NOT_SET
;
53 static struct proc_dir_entry
*aed_proc_dir
;
55 #define MaxStackSize 8100
56 #define MaxMapsSize 8100
58 /******************************************************************************
60 *****************************************************************************/
62 void msg_show(const char *prefix
, AE_Msg
*msg
)
64 const char *cmd_type
= NULL
;
65 const char *cmd_id
= NULL
;
68 LOGD("%s: EMPTY msg\n", prefix
);
72 switch (msg
->cmdType
) {
77 cmd_type
= "RESPONSE";
106 case AE_REQ_BACKTRACE
:
107 cmd_id
= "BACKTRACE";
109 case AE_REQ_COREDUMP
:
112 case AE_IND_EXP_RAISED
:
113 cmd_id
= "EXP_RAISED";
115 case AE_IND_WRN_RAISED
:
116 cmd_id
= "WARN_RAISED";
118 case AE_IND_REM_RAISED
:
119 cmd_id
= "REMIND_RAISED";
121 case AE_IND_FATAL_RAISED
:
122 cmd_id
= "FATAL_RAISED";
124 case AE_IND_LOG_CLOSE
:
127 case AE_REQ_USERSPACEBACKTRACE
:
128 cmd_id
= "USERBACKTRACE";
130 case AE_REQ_USER_REG
:
138 LOGD("%s: cmdType=%s[%d] cmdId=%s[%d] seq=%d arg=%x len=%d\n", prefix
, cmd_type
,
139 msg
->cmdType
, cmd_id
, msg
->cmdId
, msg
->seq
, msg
->arg
, msg
->len
);
143 /******************************************************************************
144 * CONSTANT DEFINITIONS
145 *****************************************************************************/
146 #define CURRENT_KE_CONSOLE "current-ke-console"
147 #define CURRENT_EE_COREDUMP "current-ee-coredump"
149 #define CURRENT_KE_ANDROID_MAIN "current-ke-android_main"
150 #define CURRENT_KE_ANDROID_RADIO "current-ke-android_radio"
151 #define CURRENT_KE_ANDROID_SYSTEM "current-ke-android_system"
152 #define CURRENT_KE_USERSPACE_INFO "current-ke-userspace_info"
154 #define CURRENT_KE_MMPROFILE "current-ke-mmprofile"
156 #define MAX_EE_COREDUMP 0x800000
158 /******************************************************************************
159 * STRUCTURE DEFINITIONS
160 *****************************************************************************/
162 struct aed_eerec
{ /* external exception record */
163 struct list_head list
;
164 char assert_type
[32];
165 char exp_filename
[512];
166 unsigned int exp_linenum
;
178 struct aed_kerec
{ /* TODO: kernel exception record */
180 struct aee_oops
*lastlog
;
184 struct aed_eerec
*eerec
;
185 wait_queue_head_t eewait
;
187 struct aed_kerec kerec
;
188 wait_queue_head_t kewait
;
192 /******************************************************************************
193 * FUNCTION PROTOTYPES
194 *****************************************************************************/
195 static long aed_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
);
198 /******************************************************************************
200 *****************************************************************************/
201 static struct aed_dev aed_dev
;
203 /******************************************************************************
205 *****************************************************************************/
207 inline void msg_destroy(char **ppmsg
)
209 if (*ppmsg
!= NULL
) {
215 inline AE_Msg
*msg_create(char **ppmsg
, int extra_size
)
220 size
= sizeof(AE_Msg
) + extra_size
;
222 *ppmsg
= vzalloc(size
);
223 if (*ppmsg
== NULL
) {
224 LOGE("%s : kzalloc() fail\n", __func__
);
228 ((AE_Msg
*) (*ppmsg
))->len
= extra_size
;
230 return (AE_Msg
*) *ppmsg
;
233 static ssize_t
msg_copy_to_user(const char *prefix
, const char *msg
, char __user
*buf
,
234 size_t count
, loff_t
*f_pos
)
239 msg_show(prefix
, (AE_Msg
*) msg
);
244 len
= ((AE_Msg
*) msg
)->len
+ sizeof(AE_Msg
);
250 /* TODO: semaphore */
251 if ((*f_pos
+ count
) > len
) {
252 LOGE("read size overflow, count=%zx, *f_pos=%llx\n", count
, *f_pos
);
253 count
= len
- *f_pos
;
258 if (copy_to_user(buf
, msg
+ *f_pos
, count
)) {
259 LOGE("copy_to_user failed\n");
269 /******************************************************************************
270 * Kernel message handlers
271 *****************************************************************************/
272 static void ke_gen_notavail_msg(void)
275 LOGD("%s\n", __func__
);
277 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, 0);
281 rep_msg
->cmdType
= AE_RSP
;
282 rep_msg
->arg
= AE_NOT_AVAILABLE
;
286 static void ke_gen_class_msg(void)
288 #define KE_CLASS_STR "Kernel (KE)"
289 #define KE_CLASS_SIZE 12
293 LOGD("%s\n", __func__
);
295 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, KE_CLASS_SIZE
);
299 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
300 rep_msg
->cmdType
= AE_RSP
;
301 rep_msg
->cmdId
= AE_REQ_CLASS
;
302 rep_msg
->len
= KE_CLASS_SIZE
;
303 strncpy(data
, KE_CLASS_STR
, KE_CLASS_SIZE
);
306 static void ke_gen_type_msg(void)
308 #define KE_TYPE_STR "PANIC"
309 #define KE_TYPE_SIZE 6
313 LOGD("%s\n", __func__
);
315 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, KE_TYPE_SIZE
);
319 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
320 rep_msg
->cmdType
= AE_RSP
;
321 rep_msg
->cmdId
= AE_REQ_TYPE
;
322 rep_msg
->len
= KE_TYPE_SIZE
;
323 strncpy(data
, KE_TYPE_STR
, KE_TYPE_SIZE
);
326 static void ke_gen_module_msg(void)
331 LOGD("%s\n", __func__
);
332 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, strlen(aed_dev
.kerec
.lastlog
->module
) + 1);
336 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
337 rep_msg
->cmdType
= AE_RSP
;
338 rep_msg
->cmdId
= AE_REQ_MODULE
;
339 rep_msg
->len
= strlen(aed_dev
.kerec
.lastlog
->module
) + 1;
340 strlcpy(data
, aed_dev
.kerec
.lastlog
->module
, sizeof(aed_dev
.kerec
.lastlog
->module
));
343 static void ke_gen_detail_msg(const AE_Msg
*req_msg
)
347 LOGD("ke_gen_detail_msg is called\n");
348 LOGD("%s req_msg arg:%d\n", __func__
, req_msg
->arg
);
350 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, aed_dev
.kerec
.lastlog
->detail_len
+ 1);
354 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
355 rep_msg
->cmdType
= AE_RSP
;
356 rep_msg
->cmdId
= AE_REQ_DETAIL
;
357 rep_msg
->len
= aed_dev
.kerec
.lastlog
->detail_len
+ 1;
358 if (aed_dev
.kerec
.lastlog
->detail
!= NULL
) {
359 strlcpy(data
, aed_dev
.kerec
.lastlog
->detail
, aed_dev
.kerec
.lastlog
->detail_len
);
361 data
[aed_dev
.kerec
.lastlog
->detail_len
] = 0;
363 LOGD("ke_gen_detail_msg is return: %s\n", data
);
366 static void ke_gen_process_msg(void)
371 LOGD("%s\n", __func__
);
372 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, AEE_PROCESS_NAME_LENGTH
);
376 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
377 rep_msg
->cmdType
= AE_RSP
;
378 rep_msg
->cmdId
= AE_REQ_PROCESS
;
380 strncpy(data
, aed_dev
.kerec
.lastlog
->process_path
, AEE_PROCESS_NAME_LENGTH
);
381 /* Count into the NUL byte at end of string */
382 rep_msg
->len
= strlen(data
) + 1;
385 static void ke_gen_backtrace_msg(void)
390 LOGD("%s\n", __func__
);
391 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, AEE_BACKTRACE_LENGTH
);
395 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
396 rep_msg
->cmdType
= AE_RSP
;
397 rep_msg
->cmdId
= AE_REQ_BACKTRACE
;
399 strcpy(data
, aed_dev
.kerec
.lastlog
->backtrace
);
400 /* Count into the NUL byte at end of string */
401 rep_msg
->len
= strlen(data
) + 1;
405 static void ke_gen_userbacktrace_msg(void)
410 userinfo_len
=aed_dev
.kerec
.lastlog
->userthread_stack
.StackLength
+ sizeof(pid_t
)+sizeof(int);
411 rep_msg
= msg_create(&aed_dev
.kerec
.msg
,MaxStackSize
); //8100==stack size
415 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
416 rep_msg
->cmdType
= AE_RSP
;
417 rep_msg
->cmdId
= AE_REQ_USERSPACEBACKTRACE
;
419 rep_msg
->len
= userinfo_len
;
420 LOGD("%s rep_msg->len:%lx, \n", __func__
,(long)rep_msg
->len
);
422 memcpy(data
,(char *) &(aed_dev
.kerec
.lastlog
->userthread_stack
), sizeof(pid_t
)+sizeof(int)); //copy pid & stackLength
423 LOGD("len(pid+int):%lx\n", (long)(sizeof(pid_t
)+sizeof(int)));
424 LOGD("des :%lx\n", (long)(data
+ sizeof(pid_t
)+sizeof(int)));
425 LOGD("src addr :%lx\n", (long)((char *)(aed_dev
.kerec
.lastlog
->userthread_stack
.Userthread_Stack
)));
427 memcpy( (data
+ sizeof(pid_t
)+sizeof(int)), (char *)(aed_dev
.kerec
.lastlog
->userthread_stack
.Userthread_Stack
),aed_dev
.kerec
.lastlog
->userthread_stack
.StackLength
);//copy userthread_stack :8k
433 LOGD("%x\n ", data
[i
]);
437 LOGD("%s +++ \n", __func__
);
440 static void ke_gen_usermaps_msg(void)
445 userinfo_len
=aed_dev
.kerec
.lastlog
->userthread_maps
.Userthread_mapsLength
+ sizeof(pid_t
)+sizeof(int);
446 rep_msg
= msg_create(&aed_dev
.kerec
.msg
,MaxMapsSize
); //8100==stack size
450 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
451 rep_msg
->cmdType
= AE_RSP
;
452 rep_msg
->cmdId
= AE_REQ_USER_MAPS
;
454 rep_msg
->len
= userinfo_len
;
455 LOGD("%s rep_msg->len:%lx, \n", __func__
,(long)rep_msg
->len
);
457 memcpy(data
,(char *) &(aed_dev
.kerec
.lastlog
->userthread_maps
), sizeof(pid_t
)+sizeof(int)); //copy pid & stackLength
458 LOGD("len(pid+int):%lx\n", (long)(sizeof(pid_t
)+sizeof(int)));
459 LOGD("des :%lx\n", (long)(data
+ sizeof(pid_t
)+sizeof(int)));
460 LOGD("src addr :%lx\n", (long)((char *)(aed_dev
.kerec
.lastlog
->userthread_maps
.Userthread_maps
)));
462 memcpy( (data
+ sizeof(pid_t
)+sizeof(int)), (char *)(aed_dev
.kerec
.lastlog
->userthread_maps
.Userthread_maps
),aed_dev
.kerec
.lastlog
->userthread_maps
.Userthread_mapsLength
);//copy userthread_stack :8k
464 LOGD("%s +++ \n", __func__
);
471 static void ke_gen_user_reg_msg(void)
476 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, sizeof(struct aee_thread_reg
));
480 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
481 rep_msg
->cmdType
= AE_RSP
;
482 rep_msg
->cmdId
= AE_REQ_USER_REG
;
484 /* Count into the NUL byte at end of string */
485 rep_msg
->len
=sizeof(struct aee_thread_reg
);
486 memcpy(data
, (char *) &(aed_dev
.kerec
.lastlog
->userthread_reg
), sizeof(struct aee_thread_reg
));
488 #ifdef __aarch64__ //64bit kernel+32 u
489 if (is_compat_task())//K64_U32
492 LOGE(" K64+ U32 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n", (long)(aed_dev
.kerec
.lastlog
->userthread_reg
.regs
.user_regs
.pc
),
493 (long)(aed_dev
.kerec
.lastlog
->userthread_reg
.regs
.regs
[14]),
494 (long)(aed_dev
.kerec
.lastlog
->userthread_reg
.regs
.regs
[13]) );
498 LOGD("%s +++ \n", __func__
);
501 static void ke_gen_ind_msg(struct aee_oops
*oops
)
503 unsigned long flags
= 0;
505 LOGD("%s oops %p\n", __func__
, oops
);
510 spin_lock_irqsave(&aed_device_lock
, flags
);
511 if (aed_dev
.kerec
.lastlog
== NULL
) {
512 aed_dev
.kerec
.lastlog
= oops
;
515 * waaa.. Two ke api at the same time
516 * or ke api during aed process is still busy at ke
517 * discard the new oops!
518 * Code should NEVER come here now!!!
521 LOGW("%s: BUG!!! More than one kernel message queued, AEE does not support concurrent KE dump\n", __func__
);
523 spin_unlock_irqrestore(&aed_device_lock
, flags
);
527 spin_unlock_irqrestore(&aed_device_lock
, flags
);
529 if (aed_dev
.kerec
.lastlog
!= NULL
) {
531 rep_msg
= msg_create(&aed_dev
.kerec
.msg
, 0);
535 rep_msg
->cmdType
= AE_IND
;
536 switch (oops
->attr
) {
537 case AE_DEFECT_REMINDING
:
538 rep_msg
->cmdId
= AE_IND_REM_RAISED
;
540 case AE_DEFECT_WARNING
:
541 rep_msg
->cmdId
= AE_IND_WRN_RAISED
;
543 case AE_DEFECT_EXCEPTION
:
544 rep_msg
->cmdId
= AE_IND_EXP_RAISED
;
546 case AE_DEFECT_FATAL
:
547 rep_msg
->cmdId
= AE_IND_FATAL_RAISED
;
550 /* Huh... something wrong, just go to exception */
551 rep_msg
->cmdId
= AE_IND_EXP_RAISED
;
555 rep_msg
->arg
= oops
->clazz
;
557 rep_msg
->dbOption
= oops
->dump_option
;
559 sema_init(&aed_ke_sem
, 0);
560 wake_up(&aed_dev
.kewait
);
561 /* wait until current ke work is done, then aed_dev is available, add a 60s timeout in case of debuggerd quit abnormally */
562 if (down_timeout(&aed_ke_sem
, msecs_to_jiffies(5 * 60 * 1000))) {
563 LOGE("%s: TIMEOUT, not receive close event, skip\n", __func__
);
569 static void ke_destroy_log(void)
571 LOGD("%s\n", __func__
);
572 msg_destroy(&aed_dev
.kerec
.msg
);
574 if (aed_dev
.kerec
.lastlog
) {
576 (aed_dev
.kerec
.lastlog
->module
, IPANIC_MODULE_TAG
,
577 strlen(IPANIC_MODULE_TAG
)) == 0) {
578 ipanic_oops_free(aed_dev
.kerec
.lastlog
, 0);
580 aee_oops_free(aed_dev
.kerec
.lastlog
);
583 aed_dev
.kerec
.lastlog
= NULL
;
587 static int ke_log_avail(void)
589 if (aed_dev
.kerec
.lastlog
!= NULL
) {
591 if (is_compat_task() != ((aed_dev
.kerec
.lastlog
->dump_option
& DB_OPT_AARCH64
) == 0))
594 LOGI("AEE api log avaiable\n");
601 static void ke_queue_request(struct aee_oops
*oops
)
603 unsigned long flags
= 0;
606 spin_lock_irqsave(&ke_queue
.lock
, flags
);
607 list_add_tail(&oops
->list
, &ke_queue
.list
);
608 spin_unlock_irqrestore(&ke_queue
.lock
, flags
);
609 ret
= queue_work(system_nrt_wq
, &ke_work
);
610 LOGI("%s: add new ke work, status %d\n", __func__
, ret
);
613 static void ke_worker(struct work_struct
*work
)
615 struct aee_oops
*oops
, *n
;
616 unsigned long flags
= 0;
617 list_for_each_entry_safe(oops
, n
, &ke_queue
.list
, list
) {
619 LOGE("%s:Invalid aee_oops struct\n", __func__
);
623 ke_gen_ind_msg(oops
);
624 spin_lock_irqsave(&ke_queue
.lock
, flags
);
625 list_del(&oops
->list
);
626 spin_unlock_irqrestore(&ke_queue
.lock
, flags
);
631 /******************************************************************************
632 * EE message handlers
633 *****************************************************************************/
634 static void ee_gen_notavail_msg(void)
637 LOGD("%s\n", __func__
);
639 rep_msg
= msg_create(&aed_dev
.eerec
->msg
, 0);
643 rep_msg
->cmdType
= AE_RSP
;
644 rep_msg
->arg
= AE_NOT_AVAILABLE
;
648 static void ee_gen_class_msg(void)
650 #define EX_CLASS_EE_STR "External (EE)"
651 #define EX_CLASS_EE_SIZE 14
655 LOGD("%s\n", __func__
);
657 rep_msg
= msg_create(&aed_dev
.eerec
->msg
, EX_CLASS_EE_SIZE
);
661 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
662 rep_msg
->cmdType
= AE_RSP
;
663 rep_msg
->cmdId
= AE_REQ_CLASS
;
664 rep_msg
->len
= EX_CLASS_EE_SIZE
;
665 strncpy(data
, EX_CLASS_EE_STR
, EX_CLASS_EE_SIZE
);
668 static void ee_gen_type_msg(void)
672 struct aed_eerec
*eerec
= aed_dev
.eerec
;
674 LOGD("%s\n", __func__
);
677 msg_create(&eerec
->msg
, strlen((char const *)&eerec
->assert_type
) + 1);
681 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
682 rep_msg
->cmdType
= AE_RSP
;
683 rep_msg
->cmdId
= AE_REQ_TYPE
;
684 rep_msg
->len
= strlen((char const *)&eerec
->assert_type
) + 1;
685 strncpy(data
, (char const *)&eerec
->assert_type
,
686 strlen((char const *)&eerec
->assert_type
));
689 static void ee_gen_process_msg(void)
691 #define PROCESS_STRLEN 512
696 struct aed_eerec
*eerec
= aed_dev
.eerec
;
698 LOGD("%s\n", __func__
);
700 rep_msg
= msg_create(&eerec
->msg
, PROCESS_STRLEN
);
704 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
706 if (eerec
->exp_linenum
!= 0) {
707 /* for old aed_md_exception1() */
708 n
= sprintf(data
, "%s", eerec
->assert_type
);
709 if (eerec
->exp_filename
[0] != 0) {
710 n
+= sprintf(data
+ n
, ", filename=%s,line=%d", eerec
->exp_filename
,
712 } else if (eerec
->fatal1
!= 0 && eerec
->fatal2
!= 0) {
713 n
+= sprintf(data
+ n
, ", err1=%d,err2=%d", eerec
->fatal1
,
717 LOGD("ee_gen_process_msg else\n");
718 n
= sprintf(data
, "%s", eerec
->exp_filename
);
721 rep_msg
->cmdType
= AE_RSP
;
722 rep_msg
->cmdId
= AE_REQ_PROCESS
;
723 rep_msg
->len
= n
+ 1;
726 extern int aee_dump_ccci_debug_info(int md_id
, void **addr
, int *size
);
727 __weak
int aee_dump_ccci_debug_info(int md_id
, void **addr
, int *size
) {
731 static void ee_gen_detail_msg(void)
739 char *ccci_log
= NULL
;
740 int ccci_log_size
= 0;
741 struct aed_eerec
*eerec
= aed_dev
.eerec
;
743 LOGD("%s\n", __func__
);
745 if (strncmp(eerec
->assert_type
, "md32", 4) == 0) {
746 msgsize
= eerec
->ee_log_size
+ 128;
747 rep_msg
= msg_create(&eerec
->msg
, msgsize
);
751 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
752 n
+= snprintf(data
+ n
, msgsize
- n
, "== EXTERNAL EXCEPTION LOG ==\n");
753 n
+= snprintf(data
+ n
, msgsize
- n
, "%s\n", (char *)eerec
->ee_log
);
755 if (strncmp(eerec
->assert_type
, "modem", 5) == 0) {
756 if (1 == sscanf(eerec
->exp_filename
, "md%d:", &md_id
)) {
757 if(aee_dump_ccci_debug_info(md_id
, (void**)&ccci_log
, &ccci_log_size
)) {
763 msgsize
= (eerec
->ee_log_size
+ ccci_log_size
) * 4 + 128;
764 rep_msg
= msg_create(&eerec
->msg
, msgsize
);
768 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
769 n
+= snprintf(data
+ n
, msgsize
- n
, "== EXTERNAL EXCEPTION LOG ==\n");
770 mem
= (int *)eerec
->ee_log
;
772 for (i
= 0; i
< eerec
->ee_log_size
/ 4; i
+= 4) {
773 n
+= snprintf(data
+ n
, msgsize
- n
, "0x%08X 0x%08X 0x%08X 0x%08X\n",
774 mem
[i
], mem
[i
+ 1], mem
[i
+ 2], mem
[i
+ 3]);
777 n
+= snprintf(data
+ n
, msgsize
-n
, "kmalloc fail, no log available\n");
780 n
+= snprintf(data
+ n
, msgsize
- n
, "== MEM DUMP(%d) ==\n", eerec
->ee_phy_size
);
782 n
+= snprintf(data
+ n
, msgsize
- n
, "== CCCI LOG ==\n");
783 mem
= (int *)ccci_log
;
784 for (i
= 0; i
< ccci_log_size
/ 4; i
+= 4) {
785 n
+= snprintf(data
+ n
, msgsize
-n
, "0x%08X 0x%08X 0x%08X 0x%08X\n",
786 mem
[i
], mem
[i
+ 1], mem
[i
+ 2], mem
[i
+ 3]);
788 n
+= snprintf(data
+ n
, msgsize
-n
, "== MEM DUMP(%d) ==\n", ccci_log_size
);
791 rep_msg
->cmdType
= AE_RSP
;
792 rep_msg
->cmdId
= AE_REQ_DETAIL
;
793 rep_msg
->arg
= AE_PASS_BY_MEM
;
794 rep_msg
->len
= n
+ 1;
797 static void ee_gen_coredump_msg(void)
802 LOGD("%s\n", __func__
);
804 rep_msg
= msg_create(&aed_dev
.eerec
->msg
, 256);
808 data
= (char *)rep_msg
+ sizeof(AE_Msg
);
809 rep_msg
->cmdType
= AE_RSP
;
810 rep_msg
->cmdId
= AE_REQ_COREDUMP
;
812 sprintf(data
, "/proc/aed/%s", CURRENT_EE_COREDUMP
);
813 rep_msg
->len
= strlen(data
) + 1;
816 static void ee_destroy_log(void)
818 struct aed_eerec
*eerec
= aed_dev
.eerec
;
819 LOGD("%s\n", __func__
);
824 aed_dev
.eerec
= NULL
;
825 msg_destroy(&eerec
->msg
);
827 if (eerec
->ee_phy
!= NULL
) {
828 vfree(eerec
->ee_phy
);
829 eerec
->ee_phy
= NULL
;
831 eerec
->ee_log_size
= 0;
832 eerec
->ee_phy_size
= 0;
834 if (eerec
->ee_log
!= NULL
) {
835 kfree(eerec
->ee_log
);
836 /*after this, another ee can enter */
837 eerec
->ee_log
= NULL
;
843 static int ee_log_avail(void)
845 return (aed_dev
.eerec
!= NULL
);
848 static void ee_gen_ind_msg(struct aed_eerec
*eerec
)
850 unsigned long flags
= 0;
853 LOGD("%s\n", __func__
);
859 Don't lock the whole function for the time is uncertain.
860 we rely on the fact that ee_rec is not null if race here!
862 spin_lock_irqsave(&aed_device_lock
, flags
);
864 if (aed_dev
.eerec
== NULL
) {
865 aed_dev
.eerec
= eerec
;
867 /* should never come here, skip*/
868 spin_unlock_irqrestore(&aed_device_lock
, flags
);
869 LOGW("%s: More than one EE message queued\n", __func__
);
872 spin_unlock_irqrestore(&aed_device_lock
, flags
);
874 rep_msg
= msg_create(&aed_dev
.eerec
->msg
, 0);
878 rep_msg
->cmdType
= AE_IND
;
879 rep_msg
->cmdId
= AE_IND_EXP_RAISED
;
880 rep_msg
->arg
= AE_EE
;
882 rep_msg
->dbOption
= eerec
->db_opt
;
884 sema_init(&aed_ee_sem
, 0);
885 wake_up(&aed_dev
.eewait
);
886 if (down_timeout(&aed_ee_sem
, msecs_to_jiffies(5 * 60 * 1000))) {
887 LOGE("%s: TIMEOUT, not receive close event, skip\n", __func__
);
891 static void ee_queue_request(struct aed_eerec
*eerec
)
894 unsigned long flags
= 0;
895 spin_lock_irqsave(&ee_queue
.lock
, flags
);
896 list_add_tail(&eerec
->list
, &ee_queue
.list
);
897 spin_unlock_irqrestore(&ee_queue
.lock
, flags
);
898 ret
= queue_work(system_nrt_wq
, &ee_work
);
899 LOGI("%s: add new ee work, status %d\n", __func__
, ret
);
902 static void ee_worker(struct work_struct
*work
)
904 struct aed_eerec
*eerec
, *tmp
;
905 unsigned long flags
= 0;
906 list_for_each_entry_safe(eerec
, tmp
, &ee_queue
.list
, list
) {
908 LOGE("%s:null eerec\n", __func__
);
912 ee_gen_ind_msg(eerec
);
913 spin_lock_irqsave(&ee_queue
.lock
, flags
);
914 list_del(&eerec
->list
);
915 spin_unlock_irqrestore(&ee_queue
.lock
, flags
);
920 /******************************************************************************
921 * AED EE File operations
922 *****************************************************************************/
923 static int aed_ee_open(struct inode
*inode
, struct file
*filp
)
925 LOGD("%s:%d:%d\n", __func__
, MAJOR(inode
->i_rdev
), MINOR(inode
->i_rdev
));
929 static int aed_ee_release(struct inode
*inode
, struct file
*filp
)
931 LOGD("%s:%d:%d\n", __func__
, MAJOR(inode
->i_rdev
), MINOR(inode
->i_rdev
));
935 static unsigned int aed_ee_poll(struct file
*file
, struct poll_table_struct
*ptable
)
937 /* LOGD("%s\n", __func__); */
938 if (ee_log_avail()) {
939 return POLLIN
| POLLRDNORM
| POLLOUT
| POLLWRNORM
;
941 poll_wait(file
, &aed_dev
.eewait
, ptable
);
946 static ssize_t
aed_ee_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*f_pos
)
948 return msg_copy_to_user(__func__
, aed_dev
.eerec
->msg
, buf
, count
, f_pos
);
951 static ssize_t
aed_ee_write(struct file
*filp
, const char __user
*buf
, size_t count
,
956 struct aed_eerec
*eerec
= aed_dev
.eerec
;
958 /* recevied a new request means the previous response is unavilable */
959 /* 1. set position to be zero */
960 /* 2. destroy the previous response message */
966 msg_destroy(&eerec
->msg
);
968 /* the request must be an *AE_Msg buffer */
969 if (count
!= sizeof(AE_Msg
)) {
970 LOGD("%s: ERR, aed_wirte count=%zx\n", __func__
, count
);
974 rsize
= copy_from_user(&msg
, buf
, count
);
976 LOGE("%s: ERR, copy_from_user rsize=%d\n", __func__
, rsize
);
980 msg_show(__func__
, &msg
);
982 if (msg
.cmdType
== AE_REQ
) {
983 if (!ee_log_avail()) {
984 ee_gen_notavail_msg();
998 ee_gen_process_msg();
1000 case AE_REQ_BACKTRACE
:
1001 ee_gen_notavail_msg();
1003 case AE_REQ_COREDUMP
:
1004 ee_gen_coredump_msg();
1007 LOGD("Unknown command id %d\n", msg
.cmdId
);
1008 ee_gen_notavail_msg();
1011 } else if (msg
.cmdType
== AE_IND
) {
1012 switch (msg
.cmdId
) {
1013 case AE_IND_LOG_CLOSE
:
1020 } else if (msg
.cmdType
== AE_RSP
) { /* IGNORE */
1026 /******************************************************************************
1027 * AED KE File operations
1028 *****************************************************************************/
1029 static int aed_ke_open(struct inode
*inode
, struct file
*filp
)
1031 struct aee_oops
*oops_open
= NULL
;
1032 int major
= MAJOR(inode
->i_rdev
);
1033 int minor
= MINOR(inode
->i_rdev
);
1034 unsigned char *devname
= filp
->f_path
.dentry
->d_iname
;
1035 LOGD("%s:(%s)%d:%d\n", __func__
, devname
, major
, minor
);
1037 if (strstr(devname
, "aed1")) { /* aed_ke_open is also used by other device */
1038 oops_open
= ipanic_oops_copy();
1039 if (oops_open
== NULL
) {
1042 /* The panic log only occur on system startup, so check it now */
1043 ke_queue_request(oops_open
);
1048 static int aed_ke_release(struct inode
*inode
, struct file
*filp
)
1050 LOGD("%s:%d:%d\n", __func__
, MAJOR(inode
->i_rdev
), MINOR(inode
->i_rdev
));
1054 static unsigned int aed_ke_poll(struct file
*file
, struct poll_table_struct
*ptable
)
1056 if (ke_log_avail()) {
1057 return POLLIN
| POLLRDNORM
| POLLOUT
| POLLWRNORM
;
1059 poll_wait(file
, &aed_dev
.kewait
, ptable
);
1064 struct current_ke_buffer
{
1069 static void *current_ke_start(struct seq_file
*m
, loff_t
*pos
)
1071 struct current_ke_buffer
*ke_buffer
;
1074 ke_buffer
= m
->private;
1075 if (ke_buffer
== NULL
)
1077 index
= *pos
* (PAGE_SIZE
- 1);
1078 if (index
< ke_buffer
->size
)
1079 return ke_buffer
->data
+ index
;
1083 static void *current_ke_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1085 struct current_ke_buffer
*ke_buffer
;
1087 ke_buffer
= m
->private;
1088 if (ke_buffer
== NULL
)
1091 index
= *pos
* (PAGE_SIZE
- 1);
1092 if (index
< ke_buffer
->size
)
1093 return ke_buffer
->data
+ index
;
1097 static void current_ke_stop(struct seq_file
*m
, void *p
)
1102 static int current_ke_show(struct seq_file
*m
, void *p
)
1105 struct current_ke_buffer
*ke_buffer
;
1106 ke_buffer
= m
->private;
1107 if (ke_buffer
== NULL
)
1109 if ((unsigned long)p
>= (unsigned long)ke_buffer
->data
+ ke_buffer
->size
)
1111 len
= (unsigned long)ke_buffer
->data
+ ke_buffer
->size
- (unsigned long)p
;
1112 len
= len
< PAGE_SIZE
? len
: (PAGE_SIZE
- 1);
1113 if (seq_write(m
, p
, len
)) {
1120 static const struct seq_operations current_ke_op
= {
1121 .start
= current_ke_start
,
1122 .next
= current_ke_next
,
1123 .stop
= current_ke_stop
,
1124 .show
= current_ke_show
1127 #define AED_CURRENT_KE_OPEN(ENTRY) \
1128 static int current_ke_##ENTRY##_open(struct inode *inode, struct file *file) \
1131 struct aee_oops *oops; \
1132 struct seq_file *m; \
1133 struct current_ke_buffer *ke_buffer; \
1134 ret = seq_open_private(file, ¤t_ke_op, sizeof(struct current_ke_buffer)); \
1136 oops = aed_dev.kerec.lastlog; \
1137 m = file->private_data; \
1140 ke_buffer = (struct current_ke_buffer *)m->private; \
1141 ke_buffer->data = oops->ENTRY; \
1142 ke_buffer->size = oops->ENTRY##_len;\
1147 #define AED_PROC_CURRENT_KE_FOPS(ENTRY) \
1148 static const struct file_operations proc_current_ke_##ENTRY##_fops = { \
1149 .open = current_ke_##ENTRY##_open, \
1151 .llseek = seq_lseek, \
1152 .release = seq_release, \
1156 static ssize_t
aed_ke_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*f_pos
)
1158 return msg_copy_to_user(__func__
, aed_dev
.kerec
.msg
, buf
, count
, f_pos
);
1161 static ssize_t
aed_ke_write(struct file
*filp
, const char __user
*buf
, size_t count
,
1167 /* recevied a new request means the previous response is unavilable */
1168 /* 1. set position to be zero */
1169 /* 2. destroy the previous response message */
1171 msg_destroy(&aed_dev
.kerec
.msg
);
1173 /* the request must be an *AE_Msg buffer */
1174 if (count
!= sizeof(AE_Msg
)) {
1175 LOGD("ERR: aed_wirte count=%zx\n", count
);
1179 rsize
= copy_from_user(&msg
, buf
, count
);
1181 LOGD("copy_from_user rsize=%d\n", rsize
);
1185 msg_show(__func__
, &msg
);
1187 if (msg
.cmdType
== AE_REQ
) {
1188 if (!ke_log_avail()) {
1189 ke_gen_notavail_msg();
1194 switch (msg
.cmdId
) {
1202 ke_gen_module_msg();
1205 ke_gen_detail_msg(&msg
);
1207 case AE_REQ_PROCESS
:
1208 ke_gen_process_msg();
1210 case AE_REQ_BACKTRACE
:
1211 ke_gen_backtrace_msg();
1213 case AE_REQ_USERSPACEBACKTRACE
:
1214 ke_gen_userbacktrace_msg();
1216 case AE_REQ_USER_REG
:
1217 ke_gen_user_reg_msg();
1219 case AE_REQ_USER_MAPS
:
1220 ke_gen_usermaps_msg();
1223 ke_gen_notavail_msg();
1226 } else if (msg
.cmdType
== AE_IND
) {
1227 switch (msg
.cmdId
) {
1228 case AE_IND_LOG_CLOSE
:
1229 /* real release operation move to ke_worker(): ke_destroy_log(); */
1236 } else if (msg
.cmdType
== AE_RSP
) { /* IGNORE */
1242 static long aed_ioctl_bt(unsigned long arg
)
1245 struct aee_ioctl ioctl
;
1246 struct aee_process_bt bt
;
1248 if (copy_from_user(&ioctl
, (struct aee_ioctl __user
*)arg
, sizeof(struct aee_ioctl
))) {
1253 ret
= aed_get_process_bt(&bt
);
1255 ioctl
.detail
= 0xAEE00001;
1256 ioctl
.size
= bt
.nr_entries
;
1257 if (copy_to_user((struct aee_ioctl __user
*)arg
, &ioctl
, sizeof(struct aee_ioctl
))) {
1265 ((struct aee_bt_frame __user
*)(unsigned long)ioctl
.out
,
1266 (const void *)bt
.entries
, sizeof(struct aee_bt_frame
) * AEE_NR_FRAME
)) {
1274 * aed process daemon and other command line may access me
1277 DEFINE_SEMAPHORE(aed_dal_sem
);
1278 static long aed_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1281 if (cmd
== AEEIOCTL_GET_PROCESS_BT
)
1282 return aed_ioctl_bt(arg
);
1285 if (down_interruptible(&aed_dal_sem
) < 0) {
1286 return -ERESTARTSYS
;
1290 case AEEIOCTL_SET_AEE_MODE
:
1292 if (copy_from_user(&aee_mode
, (void __user
*)arg
, sizeof(aee_mode
))) {
1296 LOGD("set aee mode = %d\n", aee_mode
);
1299 case AEEIOCTL_DAL_SHOW
:
1301 /*It's troublesome to allocate more than 1KB size on stack */
1302 struct aee_dal_show
*dal_show
= kzalloc(sizeof(struct aee_dal_show
),
1304 if (dal_show
== NULL
) {
1309 if (copy_from_user(dal_show
, (struct aee_dal_show __user
*)arg
,
1310 sizeof(struct aee_dal_show
))) {
1315 if (aee_mode
>= AEE_MODE_CUSTOMER_ENG
) {
1316 LOGD("DAL_SHOW not allowed (mode %d)\n", aee_mode
);
1320 /* Try to prevent overrun */
1321 dal_show
->msg
[sizeof(dal_show
->msg
) - 1] = 0;
1322 #ifdef CONFIG_MTK_FB
1323 DAL_Printf("%s", dal_show
->msg
);
1332 case AEEIOCTL_DAL_CLEAN
:
1334 /* set default bgcolor to red, it will be used in DAL_Clean */
1335 struct aee_dal_setcolor dal_setcolor
;
1336 dal_setcolor
.foreground
= 0x00ff00; /*green */
1337 dal_setcolor
.background
= 0xff0000; /*red */
1339 #ifdef CONFIG_MTK_FB
1340 DAL_SetColor(dal_setcolor
.foreground
, dal_setcolor
.background
);
1346 case AEEIOCTL_SETCOLOR
:
1348 struct aee_dal_setcolor dal_setcolor
;
1350 if (aee_mode
>= AEE_MODE_CUSTOMER_ENG
) {
1351 LOGD("SETCOLOR not allowed (mode %d)\n", aee_mode
);
1355 if (copy_from_user(&dal_setcolor
, (struct aee_dal_setcolor __user
*)arg
,
1356 sizeof(struct aee_dal_setcolor
))) {
1360 #ifdef CONFIG_MTK_FB
1361 DAL_SetColor(dal_setcolor
.foreground
, dal_setcolor
.background
);
1362 DAL_SetScreenColor(dal_setcolor
.screencolor
);
1367 case AEEIOCTL_GET_THREAD_REG
:
1369 struct aee_thread_reg
*tmp
;
1371 LOGD("%s: get thread registers ioctl\n", __func__
);
1373 tmp
= kzalloc(sizeof(struct aee_thread_reg
), GFP_KERNEL
);
1380 (tmp
, (struct aee_thread_reg __user
*)arg
,
1381 sizeof(struct aee_thread_reg
))) {
1388 struct task_struct
*task
;
1389 struct pt_regs
*user_ret
= NULL
;
1390 task
= find_task_by_vpid(tmp
->tid
);
1396 user_ret
= task_pt_regs(task
);
1397 if (NULL
== user_ret
) {
1402 memcpy(&(tmp
->regs
), user_ret
, sizeof(struct pt_regs
));
1404 ((struct aee_thread_reg __user
*)arg
, tmp
,
1405 sizeof(struct aee_thread_reg
))) {
1412 LOGD("%s: get thread registers ioctl tid invalid\n", __func__
);
1423 case AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING
: //get current user space reg when call aee_kernel_warning_api
1425 LOGD("%s: AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING,call kthread create ,is ok\n", __func__
);
1426 //kthread_create(Dstate_test, NULL, "D-state");
1428 aee_kernel_warning_api(__FILE__
, __LINE__
, DB_OPT_DEFAULT
|DB_OPT_NATIVE_BACKTRACE
, "AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING",
1429 "Trigger Kernel warning");
1433 case AEEIOCTL_CHECK_SUID_DUMPABLE
:
1437 LOGD("%s: check suid dumpable ioctl\n", __func__
);
1439 if (copy_from_user(&pid
, (void __user
*)arg
, sizeof(int))) {
1445 struct task_struct
*task
;
1447 task
= find_task_by_vpid(pid
);
1449 LOGD("%s: process:%d task null\n", __func__
, pid
);
1453 if (task
->mm
== NULL
) {
1454 LOGD("%s: process:%d task mm null\n", __func__
, pid
);
1458 dumpable
= get_dumpable(task
->mm
);
1459 if (dumpable
== 0) {
1460 LOGD("%s: set process:%d dumpable\n", __func__
, pid
);
1461 set_dumpable(task
->mm
, 1);
1463 LOGD("%s: get process:%d dumpable:%d\n", __func__
, pid
,
1467 LOGD("%s: check suid dumpable ioctl pid invalid\n", __func__
);
1474 case AEEIOCTL_SET_FORECE_RED_SCREEN
:
1477 (&force_red_screen
, (void __user
*)arg
, sizeof(force_red_screen
))) {
1481 LOGD("force aee red screen = %d\n", force_red_screen
);
1494 static void aed_get_traces(char *msg
)
1496 struct stack_trace trace
;
1497 unsigned long stacks
[32];
1500 trace
.entries
= stacks
;
1501 /*save backtraces */
1502 trace
.nr_entries
= 0;
1503 trace
.max_entries
= 32;
1505 save_stack_trace_tsk(current
, &trace
);
1506 offset
= strlen(msg
);
1507 for (i
= 0; i
< trace
.nr_entries
; i
++) {
1508 offset
+= snprintf(msg
+ offset
, AEE_BACKTRACE_LENGTH
- offset
, "[<%p>] %pS\n",
1509 (void *)trace
.entries
[i
], (void *)trace
.entries
[i
]);
1513 void Log2Buffer(struct aee_oops
*oops
,const char *fmt
, ...)
1520 len
= strlen(oops
->userthread_maps
.Userthread_maps
);
1522 if ((len
+ sizeof(buf
)) < MaxMapsSize
)
1524 vsnprintf(&oops
->userthread_maps
.Userthread_maps
[len
], sizeof(buf
), fmt
, ap
);
1525 oops
->userthread_maps
.Userthread_mapsLength
=len
+ sizeof(buf
);
1530 int DumpThreadNativeInfo(struct aee_oops
*oops
)
1532 struct task_struct
*current_task
;
1533 struct pt_regs
*user_ret
;
1534 struct vm_area_struct
*vma
;
1535 unsigned long userstack_start
= 0;
1536 unsigned long userstack_end
= 0, length
= 0;
1540 struct mm_struct
*mm
;
1543 current_task
= get_current();
1544 user_ret
= task_pt_regs(current_task
);
1545 //CurrentUserPid=current_task->pid; //Thread id
1546 oops
->userthread_reg
.tid
=current_task
->tgid
; //process ID
1547 oops
->userthread_stack
.tid
=current_task
->tgid
; //process ID
1548 oops
->userthread_maps
.tid
=current_task
->tgid
; //process ID
1550 memcpy(&oops
->userthread_reg
.regs
, user_ret
, sizeof(struct pt_regs
));
1551 LOGE(" pid:%d /// tgid:%d, stack:0x%08lx\n", current_task
->pid
, current_task
->tgid
,(long)oops
->userthread_stack
.Userthread_Stack
);
1552 if (!user_mode(user_ret
))
1555 if (current_task
->mm
== NULL
)
1561 vma
= current_task
->mm
->mmap
;
1562 while (vma
&& (mapcount
< current_task
->mm
->map_count
)) {
1563 file
= vma
->vm_file
;
1564 flags
= vma
->vm_flags
;
1567 LOGE("%08lx-%08lx %c%c%c%c %s\n", vma
->vm_start
, vma
->vm_end
,
1568 flags
& VM_READ
? 'r' : '-',
1569 flags
& VM_WRITE
? 'w' : '-',
1570 flags
& VM_EXEC
? 'x' : '-',
1571 flags
& VM_MAYSHARE
? 's' : 'p', (unsigned char *)(file
->f_path
.dentry
->d_iname
));
1572 Log2Buffer(oops
,"%08lx-%08lx %c%c%c%c %s\n", vma
->vm_start
, vma
->vm_end
,
1573 flags
& VM_READ
? 'r' : '-',
1574 flags
& VM_WRITE
? 'w' : '-',
1575 flags
& VM_EXEC
? 'x' : '-',
1576 flags
& VM_MAYSHARE
? 's' : 'p', (unsigned char *)(file
->f_path
.dentry
->d_iname
));
1578 const char *name
= arch_vma_name(vma
);
1582 if (vma
->vm_start
<= mm
->start_brk
&&
1583 vma
->vm_end
>= mm
->brk
) {
1585 } else if (vma
->vm_start
<= mm
->start_stack
&&
1586 vma
->vm_end
>= mm
->start_stack
) {
1596 LOGE("%08lx-%08lx %c%c%c%c %s\n", vma
->vm_start
, vma
->vm_end
,
1597 flags
& VM_READ
? 'r' : '-',
1598 flags
& VM_WRITE
? 'w' : '-',
1599 flags
& VM_EXEC
? 'x' : '-',
1600 flags
& VM_MAYSHARE
? 's' : 'p', name
);
1602 Log2Buffer(oops
,"%08lx-%08lx %c%c%c%c %s\n", vma
->vm_start
, vma
->vm_end
,
1603 flags
& VM_READ
? 'r' : '-',
1604 flags
& VM_WRITE
? 'w' : '-',
1605 flags
& VM_EXEC
? 'x' : '-',
1606 flags
& VM_MAYSHARE
? 's' : 'p', name
);
1617 LOGE("maps addr(0x%08lx), maps len:%d\n", (long)oops
->userthread_maps
.Userthread_maps
, oops
->userthread_maps
.Userthread_mapsLength
);
1619 #ifndef __aarch64__ //32bit
1620 LOGE(" pc/lr/sp 0x%08lx/0x%08lx/0x%08lx\n", user_ret
->ARM_pc
, user_ret
->ARM_lr
,
1622 userstack_start
= (unsigned long)user_ret
->ARM_sp
;
1624 vma
= current_task
->mm
->mmap
;
1625 while (vma
!= NULL
) {
1626 if (vma
->vm_start
<= userstack_start
&& vma
->vm_end
>= userstack_start
) {
1627 userstack_end
= vma
->vm_end
;
1631 if (vma
== current_task
->mm
->mmap
) {
1635 if (userstack_end
== 0) {
1636 LOGE("Dump native stack failed:\n");
1639 LOGE("Dump stack range (0x%08lx:0x%08lx)\n", userstack_start
, userstack_end
);
1640 length
=((userstack_end
- userstack_start
) <
1641 (MaxStackSize
-1)) ? (userstack_end
- userstack_start
) : (MaxStackSize
-1);
1642 oops
->userthread_stack
.StackLength
=length
;
1645 ret
= copy_from_user((void *)(oops
->userthread_stack
.Userthread_Stack
), (const void __user
*)(userstack_start
), length
);
1646 LOGE("u+k 32 copy_from_user ret(0x%08x),len:%lx\n", ret
, length
);
1647 LOGE("end dump native stack:\n");
1648 #else //64bit, First deal with K64+U64, the last time to deal with K64+U32
1650 if (is_compat_task())//K64_U32
1653 LOGE(" K64+ U32 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n", (long)(user_ret
->user_regs
.pc
), (long)(user_ret
->user_regs
.regs
[14]),
1654 (long)(user_ret
->user_regs
.regs
[13]) );
1655 userstack_start
= (unsigned long)user_ret
->user_regs
.regs
[13];
1656 vma
= current_task
->mm
->mmap
;
1657 while (vma
!= NULL
) {
1658 if (vma
->vm_start
<= userstack_start
&& vma
->vm_end
>= userstack_start
) {
1659 userstack_end
= vma
->vm_end
;
1663 if (vma
== current_task
->mm
->mmap
) {
1667 if (userstack_end
== 0) {
1668 LOGE("Dump native stack failed:\n");
1671 LOGE("Dump stack range (0x%08lx:0x%08lx)\n", userstack_start
, userstack_end
);
1672 length
=((userstack_end
- userstack_start
) <
1673 (MaxStackSize
-1)) ? (userstack_end
- userstack_start
) : (MaxStackSize
-1);
1674 oops
->userthread_stack
.StackLength
=length
;
1675 ret
= copy_from_user((void *)(oops
->userthread_stack
.Userthread_Stack
), (const void __user
*)(userstack_start
), length
);
1676 LOGE("copy_from_user ret(0x%16x),len:%lx\n", ret
, length
);
1680 LOGE(" K64+ U64 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n", (long)(user_ret
->user_regs
.pc
), (long)(user_ret
->user_regs
.regs
[30]),
1681 (long)(user_ret
->user_regs
.sp
) );
1682 userstack_start
= (unsigned long)user_ret
->user_regs
.sp
;
1683 vma
= current_task
->mm
->mmap
;
1686 if (vma
->vm_start
<= userstack_start
&& vma
->vm_end
>= userstack_start
) {
1687 userstack_end
= vma
->vm_end
;
1691 if (vma
== current_task
->mm
->mmap
) {
1695 if (userstack_end
== 0) {
1696 LOGE("Dump native stack failed:\n");
1700 LOGE("Dump stack range (0x%16lx:0x%16lx)\n", userstack_start
, userstack_end
);
1701 length
=((userstack_end
- userstack_start
) <
1702 (MaxStackSize
-1)) ? (userstack_end
- userstack_start
) : (MaxStackSize
-1);
1703 oops
->userthread_stack
.StackLength
=length
;
1704 ret
= copy_from_user((void *)(oops
->userthread_stack
.Userthread_Stack
), (const void __user
*)(userstack_start
), length
);
1705 LOGE("copy_from_user ret(0x%08x),len:%lx\n", ret
, length
);
1712 static void kernel_reportAPI(const AE_DEFECT_ATTR attr
, const int db_opt
, const char *module
,
1715 struct aee_oops
*oops
;
1717 if (aee_mode
>= AEE_MODE_CUSTOMER_USER
|| (aee_mode
== AEE_MODE_CUSTOMER_ENG
&& attr
> AE_DEFECT_EXCEPTION
))
1719 oops
= aee_oops_create(attr
, AE_KERNEL_PROBLEM_REPORT
, module
);
1721 n
+= snprintf(oops
->backtrace
, AEE_BACKTRACE_LENGTH
, msg
);
1722 snprintf(oops
->backtrace
+ n
, AEE_BACKTRACE_LENGTH
- n
, "\nBacktrace:\n");
1723 aed_get_traces(oops
->backtrace
);
1724 oops
->detail
= (char *)(oops
->backtrace
);
1725 oops
->detail_len
= strlen(oops
->backtrace
) + 1;
1726 oops
->dump_option
= db_opt
;
1728 if ((db_opt
& DB_OPT_NATIVE_BACKTRACE
) && !is_compat_task())
1729 oops
->dump_option
|= DB_OPT_AARCH64
;
1731 if(db_opt
& DB_OPT_NATIVE_BACKTRACE
)
1733 oops
->userthread_stack
.Userthread_Stack
= vzalloc(MaxStackSize
);
1734 if (oops
->userthread_stack
.Userthread_Stack
== NULL
)
1736 LOGE("%s: oops->userthread_stack.Userthread_Stack Vmalloc fail", __func__
);
1739 oops
->userthread_maps
.Userthread_maps
= vzalloc(MaxMapsSize
);
1740 if (oops
->userthread_maps
.Userthread_maps
== NULL
)
1742 LOGE("%s: oops->userthread_maps.Userthread_maps Vmalloc fail", __func__
);
1745 LOGE("%s: oops->userthread_stack.Userthread_Stack :0x%08lx,maps:0x%08lx", __func__
,(long)oops
->userthread_stack
.Userthread_Stack
,(long)oops
->userthread_maps
.Userthread_maps
);
1746 oops
->userthread_stack
.StackLength
=MaxStackSize
; //default 8k
1747 oops
->userthread_maps
.Userthread_mapsLength
=MaxMapsSize
; //default 8k
1748 DumpThreadNativeInfo(oops
);
1751 LOGI("%s,%s,%s,0x%x\n", __func__
, module
, msg
, db_opt
);
1752 ke_queue_request(oops
);
1756 #ifndef PARTIAL_BUILD
1757 void aee_kernel_dal_api(const char *file
, const int line
, const char *msg
)
1759 LOGW("aee_kernel_dal_api : <%s:%d> %s ", file
, line
, msg
);
1760 if (in_interrupt()) {
1761 LOGE("aee_kernel_dal_api: in interrupt context, skip");
1765 #if defined(CONFIG_MTK_AEE_AED) && defined(CONFIG_MTK_FB)
1766 if (down_interruptible(&aed_dal_sem
) < 0) {
1767 LOGI("ERROR : aee_kernel_dal_api() get aed_dal_sem fail ");
1771 struct aee_dal_setcolor dal_setcolor
;
1772 struct aee_dal_show
*dal_show
= kzalloc(sizeof(struct aee_dal_show
), GFP_KERNEL
);
1773 if (dal_show
== NULL
) {
1774 LOGI("ERROR : aee_kernel_dal_api() kzalloc fail\n ");
1778 if (((aee_mode
== AEE_MODE_MTK_ENG
) && (force_red_screen
== AEE_FORCE_NOT_SET
))
1779 || ((aee_mode
< AEE_MODE_CUSTOMER_ENG
)
1780 && (force_red_screen
== AEE_FORCE_RED_SCREEN
))) {
1781 dal_setcolor
.foreground
= 0xff00ff; /* fg: purple */
1782 dal_setcolor
.background
= 0x00ff00; /* bg: green */
1783 DAL_SetColor(dal_setcolor
.foreground
, dal_setcolor
.background
);
1784 dal_setcolor
.screencolor
= 0xff0000; /* screen:red */
1785 DAL_SetScreenColor(dal_setcolor
.screencolor
);
1786 strncpy(dal_show
->msg
, msg
, sizeof(dal_show
->msg
) - 1);
1787 dal_show
->msg
[sizeof(dal_show
->msg
) - 1] = 0;
1788 DAL_Printf("%s", dal_show
->msg
);
1791 LOGD("DAL not allowed (mode %d)\n", aee_mode
);
1798 void aee_kernel_dal_api(const char *file
, const int line
, const char *msg
)
1800 LOGW("aee_kernel_dal_api : <%s:%d> %s ", file
, line
, msg
);
1804 EXPORT_SYMBOL(aee_kernel_dal_api
);
1806 static void external_exception(const char *assert_type
, const int *log
, int log_size
,
1807 const int *phy
, int phy_size
, const char *detail
, const int db_opt
)
1810 struct aed_eerec
*eerec
;
1812 LOGD("%s : [%s] log ptr %p size %d, phy ptr %p size %d\n", __func__
,
1813 assert_type
, log
, log_size
, phy
, phy_size
);
1814 if (aee_mode
>= AEE_MODE_CUSTOMER_USER
)
1816 eerec
= kzalloc(sizeof(struct aed_eerec
), GFP_ATOMIC
);
1817 if (eerec
== NULL
) {
1818 LOGE("%s: kmalloc fail", __func__
);
1822 if ((log_size
> 0) && (log
!= NULL
)) {
1823 eerec
->ee_log_size
= log_size
;
1824 ee_log
= (int *)kmalloc(log_size
, GFP_ATOMIC
);
1825 if (NULL
!= ee_log
) {
1826 eerec
->ee_log
= ee_log
;
1827 memcpy(ee_log
, log
, log_size
);
1830 eerec
->ee_log_size
= 16;
1831 ee_log
= (int *)kzalloc(eerec
->ee_log_size
, GFP_ATOMIC
);
1832 eerec
->ee_log
= ee_log
;
1835 if (NULL
== ee_log
) {
1836 LOGE("%s : memory alloc() fail\n", __func__
);
1841 memset(eerec
->assert_type
, 0, sizeof(eerec
->assert_type
));
1842 strncpy(eerec
->assert_type
, assert_type
, sizeof(eerec
->assert_type
) - 1);
1843 memset(eerec
->exp_filename
, 0, sizeof(eerec
->exp_filename
));
1844 strncpy(eerec
->exp_filename
, detail
, sizeof(eerec
->exp_filename
) - 1);
1845 LOGD("EE [%s]\n", eerec
->assert_type
);
1847 eerec
->exp_linenum
= 0;
1851 /* Check if we can dump memory */
1852 if (in_interrupt()) {
1853 /* kernel vamlloc cannot be used in interrupt context */
1854 LOGD("External exception occur in interrupt context, no coredump");
1856 } else if ((phy
< 0) || (phy_size
> MAX_EE_COREDUMP
)) {
1857 LOGD("EE Physical memory size(%d) too large or invalid", phy_size
);
1862 eerec
->ee_phy
= (int *)vmalloc_user(phy_size
);
1863 if (eerec
->ee_phy
!= NULL
) {
1864 memcpy(eerec
->ee_phy
, phy
, phy_size
);
1865 eerec
->ee_phy_size
= phy_size
;
1867 LOGD("Losing ee phy mem due to vmalloc return NULL\n");
1868 eerec
->ee_phy_size
= 0;
1871 eerec
->ee_phy
= NULL
;
1872 eerec
->ee_phy_size
= 0;
1874 eerec
->db_opt
= db_opt
;
1875 ee_queue_request(eerec
);
1876 LOGD("external_exception out\n");
1879 static bool rr_reported
;
1880 module_param(rr_reported
, bool, S_IRUSR
| S_IWUSR
);
1882 static struct aee_kernel_api kernel_api
= {
1883 .kernel_reportAPI
= kernel_reportAPI
,
1884 .md_exception
= external_exception
,
1885 .md32_exception
= external_exception
,
1886 .combo_exception
= external_exception
1889 extern int ksysfs_bootinfo_init(void);
1890 extern void ksysfs_bootinfo_exit(void);
1892 AED_CURRENT_KE_OPEN(console
);
1893 AED_PROC_CURRENT_KE_FOPS(console
);
1894 AED_CURRENT_KE_OPEN(userspace_info
);
1895 AED_PROC_CURRENT_KE_FOPS(userspace_info
);
1896 AED_CURRENT_KE_OPEN(android_main
);
1897 AED_PROC_CURRENT_KE_FOPS(android_main
);
1898 AED_CURRENT_KE_OPEN(android_radio
);
1899 AED_PROC_CURRENT_KE_FOPS(android_radio
);
1900 AED_CURRENT_KE_OPEN(android_system
);
1901 AED_PROC_CURRENT_KE_FOPS(android_system
);
1902 AED_CURRENT_KE_OPEN(mmprofile
);
1903 AED_PROC_CURRENT_KE_FOPS(mmprofile
);
1904 AED_CURRENT_KE_OPEN(mini_rdump
);
1905 AED_PROC_CURRENT_KE_FOPS(mini_rdump
);
1908 static int current_ke_ee_coredump_open(struct inode
*inode
, struct file
*file
)
1910 int ret
= seq_open_private(file
, ¤t_ke_op
, sizeof(struct current_ke_buffer
));
1912 struct aed_eerec
*eerec
= aed_dev
.eerec
;
1913 struct seq_file
*m
= file
->private_data
;
1914 struct current_ke_buffer
*ee_buffer
;
1917 ee_buffer
= (struct current_ke_buffer
*)m
->private;
1918 ee_buffer
->data
= eerec
->ee_phy
;
1919 ee_buffer
->size
= eerec
->ee_phy_size
;
1924 /* AED_CURRENT_KE_OPEN(ee_coredump); */
1925 AED_PROC_CURRENT_KE_FOPS(ee_coredump
);
1928 static int aed_proc_init(void)
1930 aed_proc_dir
= proc_mkdir("aed", NULL
);
1931 if (aed_proc_dir
== NULL
) {
1932 LOGE("aed proc_mkdir failed\n");
1936 AED_PROC_ENTRY(current
-ke
-console
, current_ke_console
, S_IRUSR
);
1937 AED_PROC_ENTRY(current
-ke
-userspace_info
, current_ke_userspace_info
, S_IRUSR
);
1938 AED_PROC_ENTRY(current
-ke
-android_system
, current_ke_android_system
, S_IRUSR
);
1939 AED_PROC_ENTRY(current
-ke
-android_radio
, current_ke_android_radio
, S_IRUSR
);
1940 AED_PROC_ENTRY(current
-ke
-android_main
, current_ke_android_main
, S_IRUSR
);
1941 AED_PROC_ENTRY(current
-ke
-mmprofile
, current_ke_mmprofile
, S_IRUSR
);
1942 AED_PROC_ENTRY(current
-ke
-mini_rdump
, current_ke_mini_rdump
, S_IRUSR
);
1943 AED_PROC_ENTRY(current
-ee
-coredump
, current_ke_ee_coredump
, S_IRUSR
);
1945 aee_rr_proc_init(aed_proc_dir
);
1947 aed_proc_debug_init(aed_proc_dir
);
1949 dram_console_init(aed_proc_dir
);
1954 static int aed_proc_done(void)
1956 remove_proc_entry(CURRENT_KE_CONSOLE
, aed_proc_dir
);
1957 remove_proc_entry(CURRENT_EE_COREDUMP
, aed_proc_dir
);
1959 aed_proc_debug_done(aed_proc_dir
);
1961 dram_console_done(aed_proc_dir
);
1963 remove_proc_entry("aed", NULL
);
1967 /******************************************************************************
1969 *****************************************************************************/
1970 static struct file_operations aed_ee_fops
= {
1971 .owner
= THIS_MODULE
,
1972 .open
= aed_ee_open
,
1973 .release
= aed_ee_release
,
1974 .poll
= aed_ee_poll
,
1975 .read
= aed_ee_read
,
1976 .write
= aed_ee_write
,
1977 .unlocked_ioctl
= aed_ioctl
,
1978 #ifdef CONFIG_COMPAT
1979 .compat_ioctl
= aed_ioctl
,
1983 static struct file_operations aed_ke_fops
= {
1984 .owner
= THIS_MODULE
,
1985 .open
= aed_ke_open
,
1986 .release
= aed_ke_release
,
1987 .poll
= aed_ke_poll
,
1988 .read
= aed_ke_read
,
1989 .write
= aed_ke_write
,
1990 .unlocked_ioctl
= aed_ioctl
,
1991 #ifdef CONFIG_COMPAT
1992 .compat_ioctl
= aed_ioctl
,
1996 /* QHQ RT Monitor end */
1997 static struct miscdevice aed_ee_dev
= {
1998 .minor
= MISC_DYNAMIC_MINOR
,
2000 .fops
= &aed_ee_fops
,
2005 static struct miscdevice aed_ke_dev
= {
2006 .minor
= MISC_DYNAMIC_MINOR
,
2008 .fops
= &aed_ke_fops
,
2011 static int __init
aed_init(void)
2014 err
= aed_proc_init();
2018 err
= ksysfs_bootinfo_init();
2022 spin_lock_init(&ke_queue
.lock
);
2023 spin_lock_init(&ee_queue
.lock
);
2024 INIT_LIST_HEAD(&ke_queue
.list
);
2025 INIT_LIST_HEAD(&ee_queue
.list
);
2027 init_waitqueue_head(&aed_dev
.eewait
);
2028 memset(&aed_dev
.kerec
, 0, sizeof(struct aed_kerec
));
2029 init_waitqueue_head(&aed_dev
.kewait
);
2031 INIT_WORK(&ke_work
, ke_worker
);
2032 INIT_WORK(&ee_work
, ee_worker
);
2034 aee_register_api(&kernel_api
);
2036 spin_lock_init(&aed_device_lock
);
2037 err
= misc_register(&aed_ee_dev
);
2038 if (unlikely(err
)) {
2039 LOGE("aee: failed to register aed0(ee) device!\n");
2043 err
= misc_register(&aed_ke_dev
);
2044 if (unlikely(err
)) {
2045 LOGE("aee: failed to register aed1(ke) device!\n");
2052 static void __exit
aed_exit(void)
2056 err
= misc_deregister(&aed_ee_dev
);
2058 LOGE("xLog: failed to unregister aed(ee) device!\n");
2059 err
= misc_deregister(&aed_ke_dev
);
2061 LOGE("xLog: failed to unregister aed(ke) device!\n");
2067 ksysfs_bootinfo_exit();
2069 module_init(aed_init
);
2070 module_exit(aed_exit
);
2072 MODULE_LICENSE("GPL");
2073 MODULE_DESCRIPTION("MediaTek AED Driver");
2074 MODULE_AUTHOR("MediaTek Inc.");