import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / aee / aed / aed-main.c
1 /*
2 * (C) Copyright 2010
3 * MediaTek <www.MediaTek.com>
4 *
5 * Android Exception Device
6 *
7 */
8 #include <linux/cdev.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/fs.h>
12 #include <linux/hardirq.h>
13 #include <linux/init.h>
14 #include <linux/kallsyms.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/poll.h>
18 #include <linux/proc_fs.h>
19 #include <linux/wait.h>
20 #include <linux/sched.h>
21 #include <linux/vmalloc.h>
22 #include <linux/disp_assert_layer.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/semaphore.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/stacktrace.h>
29 #include <linux/compat.h>
30 #include <linux/aee.h>
31 #include <linux/seq_file.h>
32 #include "aed.h"
33
34 struct aee_req_queue {
35 struct list_head list;
36 spinlock_t lock;
37 };
38
39 static struct aee_req_queue ke_queue;
40 static struct work_struct ke_work;
41 static DEFINE_SEMAPHORE(aed_ke_sem);
42
43 static struct aee_req_queue ee_queue;
44 static struct work_struct ee_work;
45 static DEFINE_SEMAPHORE(aed_ee_sem);
46 /*
47 * may be accessed from irq
48 */
49 static spinlock_t aed_device_lock;
50 int aee_mode = AEE_MODE_CUSTOMER_USER;
51 static int force_red_screen = AEE_FORCE_NOT_SET;
52
53 static struct proc_dir_entry *aed_proc_dir;
54
55 #define MaxStackSize 8100
56 #define MaxMapsSize 8100
57
58 /******************************************************************************
59 * DEBUG UTILITIES
60 *****************************************************************************/
61
62 void msg_show(const char *prefix, AE_Msg *msg)
63 {
64 const char *cmd_type = NULL;
65 const char *cmd_id = NULL;
66
67 if (msg == NULL) {
68 LOGD("%s: EMPTY msg\n", prefix);
69 return;
70 }
71
72 switch (msg->cmdType) {
73 case AE_REQ:
74 cmd_type = "REQ";
75 break;
76 case AE_RSP:
77 cmd_type = "RESPONSE";
78 break;
79 case AE_IND:
80 cmd_type = "IND";
81 break;
82 default:
83 cmd_type = "UNKNOWN";
84 break;
85 }
86
87 switch (msg->cmdId) {
88 case AE_REQ_IDX:
89 cmd_id = "IDX";
90 break;
91 case AE_REQ_CLASS:
92 cmd_id = "CLASS";
93 break;
94 case AE_REQ_TYPE:
95 cmd_id = "TYPE";
96 break;
97 case AE_REQ_MODULE:
98 cmd_id = "MODULE";
99 break;
100 case AE_REQ_PROCESS:
101 cmd_id = "PROCESS";
102 break;
103 case AE_REQ_DETAIL:
104 cmd_id = "DETAIL";
105 break;
106 case AE_REQ_BACKTRACE:
107 cmd_id = "BACKTRACE";
108 break;
109 case AE_REQ_COREDUMP:
110 cmd_id = "COREDUMP";
111 break;
112 case AE_IND_EXP_RAISED:
113 cmd_id = "EXP_RAISED";
114 break;
115 case AE_IND_WRN_RAISED:
116 cmd_id = "WARN_RAISED";
117 break;
118 case AE_IND_REM_RAISED:
119 cmd_id = "REMIND_RAISED";
120 break;
121 case AE_IND_FATAL_RAISED:
122 cmd_id = "FATAL_RAISED";
123 break;
124 case AE_IND_LOG_CLOSE:
125 cmd_id = "CLOSE";
126 break;
127 case AE_REQ_USERSPACEBACKTRACE:
128 cmd_id = "USERBACKTRACE";
129 break;
130 case AE_REQ_USER_REG:
131 cmd_id = "USERREG";
132 break;
133 default:
134 cmd_id = "UNKNOWN";
135 break;
136 }
137
138 LOGD("%s: cmdType=%s[%d] cmdId=%s[%d] seq=%d arg=%x len=%d\n", prefix, cmd_type,
139 msg->cmdType, cmd_id, msg->cmdId, msg->seq, msg->arg, msg->len);
140 }
141
142
143 /******************************************************************************
144 * CONSTANT DEFINITIONS
145 *****************************************************************************/
146 #define CURRENT_KE_CONSOLE "current-ke-console"
147 #define CURRENT_EE_COREDUMP "current-ee-coredump"
148
149 #define CURRENT_KE_ANDROID_MAIN "current-ke-android_main"
150 #define CURRENT_KE_ANDROID_RADIO "current-ke-android_radio"
151 #define CURRENT_KE_ANDROID_SYSTEM "current-ke-android_system"
152 #define CURRENT_KE_USERSPACE_INFO "current-ke-userspace_info"
153
154 #define CURRENT_KE_MMPROFILE "current-ke-mmprofile"
155
156 #define MAX_EE_COREDUMP 0x800000
157
158 /******************************************************************************
159 * STRUCTURE DEFINITIONS
160 *****************************************************************************/
161
162 struct aed_eerec { /* external exception record */
163 struct list_head list;
164 char assert_type[32];
165 char exp_filename[512];
166 unsigned int exp_linenum;
167 unsigned int fatal1;
168 unsigned int fatal2;
169
170 int *ee_log;
171 int ee_log_size;
172 int *ee_phy;
173 int ee_phy_size;
174 char *msg;
175 int db_opt;
176 };
177
178 struct aed_kerec { /* TODO: kernel exception record */
179 char *msg;
180 struct aee_oops *lastlog;
181 };
182
183 struct aed_dev {
184 struct aed_eerec *eerec;
185 wait_queue_head_t eewait;
186
187 struct aed_kerec kerec;
188 wait_queue_head_t kewait;
189 };
190
191
192 /******************************************************************************
193 * FUNCTION PROTOTYPES
194 *****************************************************************************/
195 static long aed_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
196
197
198 /******************************************************************************
199 * GLOBAL DATA
200 *****************************************************************************/
201 static struct aed_dev aed_dev;
202
203 /******************************************************************************
204 * Message Utilities
205 *****************************************************************************/
206
207 inline void msg_destroy(char **ppmsg)
208 {
209 if (*ppmsg != NULL) {
210 vfree(*ppmsg);
211 *ppmsg = NULL;
212 }
213 }
214
215 inline AE_Msg *msg_create(char **ppmsg, int extra_size)
216 {
217 int size;
218
219 msg_destroy(ppmsg);
220 size = sizeof(AE_Msg) + extra_size;
221
222 *ppmsg = vzalloc(size);
223 if (*ppmsg == NULL) {
224 LOGE("%s : kzalloc() fail\n", __func__);
225 return NULL;
226 }
227
228 ((AE_Msg *) (*ppmsg))->len = extra_size;
229
230 return (AE_Msg *) *ppmsg;
231 }
232
233 static ssize_t msg_copy_to_user(const char *prefix, const char *msg, char __user *buf,
234 size_t count, loff_t *f_pos)
235 {
236 ssize_t ret = 0;
237 int len;
238
239 msg_show(prefix, (AE_Msg *) msg);
240
241 if (msg == NULL)
242 return 0;
243
244 len = ((AE_Msg *) msg)->len + sizeof(AE_Msg);
245
246 if (*f_pos >= len) {
247 ret = 0;
248 goto out;
249 }
250 /* TODO: semaphore */
251 if ((*f_pos + count) > len) {
252 LOGE("read size overflow, count=%zx, *f_pos=%llx\n", count, *f_pos);
253 count = len - *f_pos;
254 ret = -EFAULT;
255 goto out;
256 }
257
258 if (copy_to_user(buf, msg + *f_pos, count)) {
259 LOGE("copy_to_user failed\n");
260 ret = -EFAULT;
261 goto out;
262 }
263 *f_pos += count;
264 ret = count;
265 out:
266 return ret;
267 }
268
269 /******************************************************************************
270 * Kernel message handlers
271 *****************************************************************************/
272 static void ke_gen_notavail_msg(void)
273 {
274 AE_Msg *rep_msg;
275 LOGD("%s\n", __func__);
276
277 rep_msg = msg_create(&aed_dev.kerec.msg, 0);
278 if (rep_msg == NULL)
279 return;
280
281 rep_msg->cmdType = AE_RSP;
282 rep_msg->arg = AE_NOT_AVAILABLE;
283 rep_msg->len = 0;
284 }
285
286 static void ke_gen_class_msg(void)
287 {
288 #define KE_CLASS_STR "Kernel (KE)"
289 #define KE_CLASS_SIZE 12
290 AE_Msg *rep_msg;
291 char *data;
292
293 LOGD("%s\n", __func__);
294
295 rep_msg = msg_create(&aed_dev.kerec.msg, KE_CLASS_SIZE);
296 if (rep_msg == NULL)
297 return;
298
299 data = (char *)rep_msg + sizeof(AE_Msg);
300 rep_msg->cmdType = AE_RSP;
301 rep_msg->cmdId = AE_REQ_CLASS;
302 rep_msg->len = KE_CLASS_SIZE;
303 strncpy(data, KE_CLASS_STR, KE_CLASS_SIZE);
304 }
305
306 static void ke_gen_type_msg(void)
307 {
308 #define KE_TYPE_STR "PANIC"
309 #define KE_TYPE_SIZE 6
310 AE_Msg *rep_msg;
311 char *data;
312
313 LOGD("%s\n", __func__);
314
315 rep_msg = msg_create(&aed_dev.kerec.msg, KE_TYPE_SIZE);
316 if (rep_msg == NULL)
317 return;
318
319 data = (char *)rep_msg + sizeof(AE_Msg);
320 rep_msg->cmdType = AE_RSP;
321 rep_msg->cmdId = AE_REQ_TYPE;
322 rep_msg->len = KE_TYPE_SIZE;
323 strncpy(data, KE_TYPE_STR, KE_TYPE_SIZE);
324 }
325
326 static void ke_gen_module_msg(void)
327 {
328 AE_Msg *rep_msg;
329 char *data;
330
331 LOGD("%s\n", __func__);
332 rep_msg = msg_create(&aed_dev.kerec.msg, strlen(aed_dev.kerec.lastlog->module) + 1);
333 if (rep_msg == NULL)
334 return;
335
336 data = (char *)rep_msg + sizeof(AE_Msg);
337 rep_msg->cmdType = AE_RSP;
338 rep_msg->cmdId = AE_REQ_MODULE;
339 rep_msg->len = strlen(aed_dev.kerec.lastlog->module) + 1;
340 strlcpy(data, aed_dev.kerec.lastlog->module, sizeof(aed_dev.kerec.lastlog->module));
341 }
342
343 static void ke_gen_detail_msg(const AE_Msg *req_msg)
344 {
345 AE_Msg *rep_msg;
346 char *data;
347 LOGD("ke_gen_detail_msg is called\n");
348 LOGD("%s req_msg arg:%d\n", __func__, req_msg->arg);
349
350 rep_msg = msg_create(&aed_dev.kerec.msg, aed_dev.kerec.lastlog->detail_len + 1);
351 if (rep_msg == NULL)
352 return;
353
354 data = (char *)rep_msg + sizeof(AE_Msg);
355 rep_msg->cmdType = AE_RSP;
356 rep_msg->cmdId = AE_REQ_DETAIL;
357 rep_msg->len = aed_dev.kerec.lastlog->detail_len + 1;
358 if (aed_dev.kerec.lastlog->detail != NULL) {
359 strlcpy(data, aed_dev.kerec.lastlog->detail, aed_dev.kerec.lastlog->detail_len);
360 }
361 data[aed_dev.kerec.lastlog->detail_len] = 0;
362
363 LOGD("ke_gen_detail_msg is return: %s\n", data);
364 }
365
366 static void ke_gen_process_msg(void)
367 {
368 AE_Msg *rep_msg;
369 char *data;
370
371 LOGD("%s\n", __func__);
372 rep_msg = msg_create(&aed_dev.kerec.msg, AEE_PROCESS_NAME_LENGTH);
373 if (rep_msg == NULL)
374 return;
375
376 data = (char *)rep_msg + sizeof(AE_Msg);
377 rep_msg->cmdType = AE_RSP;
378 rep_msg->cmdId = AE_REQ_PROCESS;
379
380 strncpy(data, aed_dev.kerec.lastlog->process_path, AEE_PROCESS_NAME_LENGTH);
381 /* Count into the NUL byte at end of string */
382 rep_msg->len = strlen(data) + 1;
383 }
384
385 static void ke_gen_backtrace_msg(void)
386 {
387 AE_Msg *rep_msg;
388 char *data;
389
390 LOGD("%s\n", __func__);
391 rep_msg = msg_create(&aed_dev.kerec.msg, AEE_BACKTRACE_LENGTH);
392 if (rep_msg == NULL)
393 return;
394
395 data = (char *)rep_msg + sizeof(AE_Msg);
396 rep_msg->cmdType = AE_RSP;
397 rep_msg->cmdId = AE_REQ_BACKTRACE;
398
399 strcpy(data, aed_dev.kerec.lastlog->backtrace);
400 /* Count into the NUL byte at end of string */
401 rep_msg->len = strlen(data) + 1;
402 }
403
404
405 static void ke_gen_userbacktrace_msg(void)
406 {
407 AE_Msg *rep_msg;
408 char *data;
409 int userinfo_len=0;
410 userinfo_len=aed_dev.kerec.lastlog->userthread_stack.StackLength + sizeof(pid_t)+sizeof(int);
411 rep_msg = msg_create(&aed_dev.kerec.msg,MaxStackSize ); //8100==stack size
412 if (rep_msg == NULL)
413 return;
414
415 data = (char *)rep_msg + sizeof(AE_Msg);
416 rep_msg->cmdType = AE_RSP;
417 rep_msg->cmdId = AE_REQ_USERSPACEBACKTRACE;
418
419 rep_msg->len = userinfo_len;
420 LOGD("%s rep_msg->len:%lx, \n", __func__,(long)rep_msg->len);
421
422 memcpy(data,(char *) &(aed_dev.kerec.lastlog->userthread_stack), sizeof(pid_t)+sizeof(int)); //copy pid & stackLength
423 LOGD("len(pid+int):%lx\n", (long)(sizeof(pid_t)+sizeof(int)));
424 LOGD("des :%lx\n", (long)(data+ sizeof(pid_t)+sizeof(int)));
425 LOGD("src addr :%lx\n", (long)((char *)(aed_dev.kerec.lastlog->userthread_stack.Userthread_Stack)));
426
427 memcpy( (data+ sizeof(pid_t)+sizeof(int)), (char *)(aed_dev.kerec.lastlog->userthread_stack.Userthread_Stack),aed_dev.kerec.lastlog->userthread_stack.StackLength);//copy userthread_stack :8k
428
429 #if 0 //for debug
430 {
431 int i=0;
432 for (i=0;i<64;i++)
433 LOGD("%x\n ", data[i]);
434
435 }
436 #endif
437 LOGD("%s +++ \n", __func__);
438 }
439
440 static void ke_gen_usermaps_msg(void)
441 {
442 AE_Msg *rep_msg;
443 char *data;
444 int userinfo_len=0;
445 userinfo_len=aed_dev.kerec.lastlog->userthread_maps.Userthread_mapsLength+ sizeof(pid_t)+sizeof(int);
446 rep_msg = msg_create(&aed_dev.kerec.msg,MaxMapsSize); //8100==stack size
447 if (rep_msg == NULL)
448 return;
449
450 data = (char *)rep_msg + sizeof(AE_Msg);
451 rep_msg->cmdType = AE_RSP;
452 rep_msg->cmdId = AE_REQ_USER_MAPS;
453
454 rep_msg->len = userinfo_len;
455 LOGD("%s rep_msg->len:%lx, \n", __func__,(long)rep_msg->len);
456
457 memcpy(data,(char *) &(aed_dev.kerec.lastlog->userthread_maps), sizeof(pid_t)+sizeof(int)); //copy pid & stackLength
458 LOGD("len(pid+int):%lx\n", (long)(sizeof(pid_t)+sizeof(int)));
459 LOGD("des :%lx\n", (long)(data+ sizeof(pid_t)+sizeof(int)));
460 LOGD("src addr :%lx\n", (long)((char *)(aed_dev.kerec.lastlog->userthread_maps.Userthread_maps)));
461
462 memcpy( (data+ sizeof(pid_t)+sizeof(int)), (char *)(aed_dev.kerec.lastlog->userthread_maps.Userthread_maps),aed_dev.kerec.lastlog->userthread_maps.Userthread_mapsLength);//copy userthread_stack :8k
463
464 LOGD("%s +++ \n", __func__);
465 }
466
467
468
469
470
471 static void ke_gen_user_reg_msg(void)
472 {
473 AE_Msg *rep_msg;
474 char *data;
475
476 rep_msg = msg_create(&aed_dev.kerec.msg, sizeof(struct aee_thread_reg ));
477 if (rep_msg == NULL)
478 return;
479
480 data = (char *)rep_msg + sizeof(AE_Msg);
481 rep_msg->cmdType = AE_RSP;
482 rep_msg->cmdId = AE_REQ_USER_REG;
483
484 /* Count into the NUL byte at end of string */
485 rep_msg->len =sizeof(struct aee_thread_reg );
486 memcpy(data, (char *) &(aed_dev.kerec.lastlog->userthread_reg), sizeof(struct aee_thread_reg ));
487 #if 0 //for debug
488 #ifdef __aarch64__ //64bit kernel+32 u
489 if (is_compat_task())//K64_U32
490 {
491
492 LOGE(" K64+ U32 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n", (long)(aed_dev.kerec.lastlog->userthread_reg.regs.user_regs.pc),
493 (long)(aed_dev.kerec.lastlog->userthread_reg.regs.regs[14]),
494 (long)(aed_dev.kerec.lastlog->userthread_reg.regs.regs[13]) );
495 }
496 #endif
497 #endif
498 LOGD("%s +++ \n", __func__);
499 }
500
501 static void ke_gen_ind_msg(struct aee_oops *oops)
502 {
503 unsigned long flags = 0;
504
505 LOGD("%s oops %p\n", __func__, oops);
506 if (oops == NULL) {
507 return;
508 }
509
510 spin_lock_irqsave(&aed_device_lock, flags);
511 if (aed_dev.kerec.lastlog == NULL) {
512 aed_dev.kerec.lastlog = oops;
513 } else {
514 /*
515 * waaa.. Two ke api at the same time
516 * or ke api during aed process is still busy at ke
517 * discard the new oops!
518 * Code should NEVER come here now!!!
519 */
520
521 LOGW("%s: BUG!!! More than one kernel message queued, AEE does not support concurrent KE dump\n", __func__);
522 aee_oops_free(oops);
523 spin_unlock_irqrestore(&aed_device_lock, flags);
524
525 return;
526 }
527 spin_unlock_irqrestore(&aed_device_lock, flags);
528
529 if (aed_dev.kerec.lastlog != NULL) {
530 AE_Msg *rep_msg;
531 rep_msg = msg_create(&aed_dev.kerec.msg, 0);
532 if (rep_msg == NULL)
533 return;
534
535 rep_msg->cmdType = AE_IND;
536 switch (oops->attr) {
537 case AE_DEFECT_REMINDING:
538 rep_msg->cmdId = AE_IND_REM_RAISED;
539 break;
540 case AE_DEFECT_WARNING:
541 rep_msg->cmdId = AE_IND_WRN_RAISED;
542 break;
543 case AE_DEFECT_EXCEPTION:
544 rep_msg->cmdId = AE_IND_EXP_RAISED;
545 break;
546 case AE_DEFECT_FATAL:
547 rep_msg->cmdId = AE_IND_FATAL_RAISED;
548 break;
549 default:
550 /* Huh... something wrong, just go to exception */
551 rep_msg->cmdId = AE_IND_EXP_RAISED;
552 break;
553 }
554
555 rep_msg->arg = oops->clazz;
556 rep_msg->len = 0;
557 rep_msg->dbOption = oops->dump_option;
558
559 sema_init(&aed_ke_sem, 0);
560 wake_up(&aed_dev.kewait);
561 /* wait until current ke work is done, then aed_dev is available, add a 60s timeout in case of debuggerd quit abnormally */
562 if (down_timeout(&aed_ke_sem, msecs_to_jiffies(5 * 60 * 1000))) {
563 LOGE("%s: TIMEOUT, not receive close event, skip\n", __func__);
564 }
565 }
566
567 }
568
569 static void ke_destroy_log(void)
570 {
571 LOGD("%s\n", __func__);
572 msg_destroy(&aed_dev.kerec.msg);
573
574 if (aed_dev.kerec.lastlog) {
575 if (strncmp
576 (aed_dev.kerec.lastlog->module, IPANIC_MODULE_TAG,
577 strlen(IPANIC_MODULE_TAG)) == 0) {
578 ipanic_oops_free(aed_dev.kerec.lastlog, 0);
579 } else {
580 aee_oops_free(aed_dev.kerec.lastlog);
581 }
582
583 aed_dev.kerec.lastlog = NULL;
584 }
585 }
586
587 static int ke_log_avail(void)
588 {
589 if (aed_dev.kerec.lastlog != NULL) {
590 #ifdef __aarch64__
591 if (is_compat_task() != ((aed_dev.kerec.lastlog->dump_option & DB_OPT_AARCH64) == 0))
592 return 0;
593 #endif
594 LOGI("AEE api log avaiable\n");
595 return 1;
596 }
597
598 return 0;
599 }
600
601 static void ke_queue_request(struct aee_oops *oops)
602 {
603 unsigned long flags = 0;
604 int ret;
605
606 spin_lock_irqsave(&ke_queue.lock, flags);
607 list_add_tail(&oops->list, &ke_queue.list);
608 spin_unlock_irqrestore(&ke_queue.lock, flags);
609 ret = queue_work(system_nrt_wq, &ke_work);
610 LOGI("%s: add new ke work, status %d\n", __func__, ret);
611 }
612
613 static void ke_worker(struct work_struct *work)
614 {
615 struct aee_oops *oops, *n;
616 unsigned long flags = 0;
617 list_for_each_entry_safe(oops, n, &ke_queue.list, list) {
618 if (oops == NULL) {
619 LOGE("%s:Invalid aee_oops struct\n", __func__);
620 return;
621 }
622
623 ke_gen_ind_msg(oops);
624 spin_lock_irqsave(&ke_queue.lock, flags);
625 list_del(&oops->list);
626 spin_unlock_irqrestore(&ke_queue.lock, flags);
627 ke_destroy_log();
628 }
629 }
630
631 /******************************************************************************
632 * EE message handlers
633 *****************************************************************************/
634 static void ee_gen_notavail_msg(void)
635 {
636 AE_Msg *rep_msg;
637 LOGD("%s\n", __func__);
638
639 rep_msg = msg_create(&aed_dev.eerec->msg, 0);
640 if (rep_msg == NULL)
641 return;
642
643 rep_msg->cmdType = AE_RSP;
644 rep_msg->arg = AE_NOT_AVAILABLE;
645 rep_msg->len = 0;
646 }
647
648 static void ee_gen_class_msg(void)
649 {
650 #define EX_CLASS_EE_STR "External (EE)"
651 #define EX_CLASS_EE_SIZE 14
652 AE_Msg *rep_msg;
653 char *data;
654
655 LOGD("%s\n", __func__);
656
657 rep_msg = msg_create(&aed_dev.eerec->msg, EX_CLASS_EE_SIZE);
658 if (rep_msg == NULL)
659 return;
660
661 data = (char *)rep_msg + sizeof(AE_Msg);
662 rep_msg->cmdType = AE_RSP;
663 rep_msg->cmdId = AE_REQ_CLASS;
664 rep_msg->len = EX_CLASS_EE_SIZE;
665 strncpy(data, EX_CLASS_EE_STR, EX_CLASS_EE_SIZE);
666 }
667
668 static void ee_gen_type_msg(void)
669 {
670 AE_Msg *rep_msg;
671 char *data;
672 struct aed_eerec *eerec = aed_dev.eerec;
673
674 LOGD("%s\n", __func__);
675
676 rep_msg =
677 msg_create(&eerec->msg, strlen((char const *)&eerec->assert_type) + 1);
678 if (rep_msg == NULL)
679 return;
680
681 data = (char *)rep_msg + sizeof(AE_Msg);
682 rep_msg->cmdType = AE_RSP;
683 rep_msg->cmdId = AE_REQ_TYPE;
684 rep_msg->len = strlen((char const *)&eerec->assert_type) + 1;
685 strncpy(data, (char const *)&eerec->assert_type,
686 strlen((char const *)&eerec->assert_type));
687 }
688
689 static void ee_gen_process_msg(void)
690 {
691 #define PROCESS_STRLEN 512
692
693 int n = 0;
694 AE_Msg *rep_msg;
695 char *data;
696 struct aed_eerec *eerec = aed_dev.eerec;
697
698 LOGD("%s\n", __func__);
699
700 rep_msg = msg_create(&eerec->msg, PROCESS_STRLEN);
701 if (rep_msg == NULL)
702 return;
703
704 data = (char *)rep_msg + sizeof(AE_Msg);
705
706 if (eerec->exp_linenum != 0) {
707 /* for old aed_md_exception1() */
708 n = sprintf(data, "%s", eerec->assert_type);
709 if (eerec->exp_filename[0] != 0) {
710 n += sprintf(data + n, ", filename=%s,line=%d", eerec->exp_filename,
711 eerec->exp_linenum);
712 } else if (eerec->fatal1 != 0 && eerec->fatal2 != 0) {
713 n += sprintf(data + n, ", err1=%d,err2=%d", eerec->fatal1,
714 eerec->fatal2);
715 }
716 } else {
717 LOGD("ee_gen_process_msg else\n");
718 n = sprintf(data, "%s", eerec->exp_filename);
719 }
720
721 rep_msg->cmdType = AE_RSP;
722 rep_msg->cmdId = AE_REQ_PROCESS;
723 rep_msg->len = n + 1;
724 }
725
726 extern int aee_dump_ccci_debug_info(int md_id, void **addr, int *size);
727 __weak int aee_dump_ccci_debug_info(int md_id, void **addr, int *size) {
728 return -1;
729 }
730
731 static void ee_gen_detail_msg(void)
732 {
733 int i, n = 0;
734 AE_Msg *rep_msg;
735 char *data;
736 int *mem;
737 int md_id;
738 int msgsize;
739 char *ccci_log = NULL;
740 int ccci_log_size = 0;
741 struct aed_eerec *eerec = aed_dev.eerec;
742
743 LOGD("%s\n", __func__);
744
745 if (strncmp(eerec->assert_type, "md32", 4) == 0) {
746 msgsize = eerec->ee_log_size + 128;
747 rep_msg = msg_create(&eerec->msg, msgsize);
748 if (rep_msg == NULL)
749 return;
750
751 data = (char *)rep_msg + sizeof(AE_Msg);
752 n += snprintf(data + n, msgsize - n, "== EXTERNAL EXCEPTION LOG ==\n");
753 n += snprintf(data + n, msgsize - n, "%s\n", (char *)eerec->ee_log);
754 } else {
755 if (strncmp(eerec->assert_type, "modem", 5) == 0) {
756 if (1 == sscanf(eerec->exp_filename, "md%d:", &md_id)) {
757 if(aee_dump_ccci_debug_info(md_id, (void**)&ccci_log, &ccci_log_size)) {
758 ccci_log = NULL;
759 ccci_log_size = 0;
760 }
761 }
762 }
763 msgsize = (eerec->ee_log_size + ccci_log_size) * 4 + 128;
764 rep_msg = msg_create(&eerec->msg, msgsize);
765 if (rep_msg == NULL)
766 return;
767
768 data = (char *)rep_msg + sizeof(AE_Msg);
769 n += snprintf(data + n, msgsize - n, "== EXTERNAL EXCEPTION LOG ==\n");
770 mem = (int *)eerec->ee_log;
771 if (mem) {
772 for (i = 0; i < eerec->ee_log_size / 4; i += 4) {
773 n += snprintf(data + n, msgsize - n, "0x%08X 0x%08X 0x%08X 0x%08X\n",
774 mem[i], mem[i + 1], mem[i + 2], mem[i + 3]);
775 }
776 } else {
777 n += snprintf(data + n, msgsize -n, "kmalloc fail, no log available\n");
778 }
779 }
780 n += snprintf(data + n, msgsize - n, "== MEM DUMP(%d) ==\n", eerec->ee_phy_size);
781 if (ccci_log) {
782 n += snprintf(data + n, msgsize - n, "== CCCI LOG ==\n");
783 mem = (int *)ccci_log;
784 for (i = 0; i < ccci_log_size / 4; i += 4) {
785 n += snprintf(data + n, msgsize -n, "0x%08X 0x%08X 0x%08X 0x%08X\n",
786 mem[i], mem[i + 1], mem[i + 2], mem[i + 3]);
787 }
788 n += snprintf(data + n, msgsize -n, "== MEM DUMP(%d) ==\n", ccci_log_size);
789 }
790
791 rep_msg->cmdType = AE_RSP;
792 rep_msg->cmdId = AE_REQ_DETAIL;
793 rep_msg->arg = AE_PASS_BY_MEM;
794 rep_msg->len = n + 1;
795 }
796
797 static void ee_gen_coredump_msg(void)
798 {
799 AE_Msg *rep_msg;
800 char *data;
801
802 LOGD("%s\n", __func__);
803
804 rep_msg = msg_create(&aed_dev.eerec->msg, 256);
805 if (rep_msg == NULL)
806 return;
807
808 data = (char *)rep_msg + sizeof(AE_Msg);
809 rep_msg->cmdType = AE_RSP;
810 rep_msg->cmdId = AE_REQ_COREDUMP;
811 rep_msg->arg = 0;
812 sprintf(data, "/proc/aed/%s", CURRENT_EE_COREDUMP);
813 rep_msg->len = strlen(data) + 1;
814 }
815
816 static void ee_destroy_log(void)
817 {
818 struct aed_eerec *eerec = aed_dev.eerec;
819 LOGD("%s\n", __func__);
820
821 if (eerec == NULL)
822 return;
823
824 aed_dev.eerec = NULL;
825 msg_destroy(&eerec->msg);
826
827 if (eerec->ee_phy != NULL) {
828 vfree(eerec->ee_phy);
829 eerec->ee_phy = NULL;
830 }
831 eerec->ee_log_size = 0;
832 eerec->ee_phy_size = 0;
833
834 if (eerec->ee_log != NULL) {
835 kfree(eerec->ee_log);
836 /*after this, another ee can enter */
837 eerec->ee_log = NULL;
838 }
839
840 kfree(eerec);
841 }
842
843 static int ee_log_avail(void)
844 {
845 return (aed_dev.eerec != NULL);
846 }
847
848 static void ee_gen_ind_msg(struct aed_eerec *eerec)
849 {
850 unsigned long flags = 0;
851 AE_Msg *rep_msg;
852
853 LOGD("%s\n", __func__);
854 if (eerec == NULL) {
855 return;
856 }
857
858 /*
859 Don't lock the whole function for the time is uncertain.
860 we rely on the fact that ee_rec is not null if race here!
861 */
862 spin_lock_irqsave(&aed_device_lock, flags);
863
864 if (aed_dev.eerec == NULL) {
865 aed_dev.eerec = eerec;
866 } else {
867 /* should never come here, skip*/
868 spin_unlock_irqrestore(&aed_device_lock, flags);
869 LOGW("%s: More than one EE message queued\n", __func__);
870 return;
871 }
872 spin_unlock_irqrestore(&aed_device_lock, flags);
873
874 rep_msg = msg_create(&aed_dev.eerec->msg, 0);
875 if (rep_msg == NULL)
876 return;
877
878 rep_msg->cmdType = AE_IND;
879 rep_msg->cmdId = AE_IND_EXP_RAISED;
880 rep_msg->arg = AE_EE;
881 rep_msg->len = 0;
882 rep_msg->dbOption = eerec->db_opt;
883
884 sema_init(&aed_ee_sem, 0);
885 wake_up(&aed_dev.eewait);
886 if (down_timeout(&aed_ee_sem, msecs_to_jiffies(5 * 60 * 1000))) {
887 LOGE("%s: TIMEOUT, not receive close event, skip\n", __func__);
888 }
889 }
890
891 static void ee_queue_request(struct aed_eerec *eerec)
892 {
893 int ret;
894 unsigned long flags = 0;
895 spin_lock_irqsave(&ee_queue.lock, flags);
896 list_add_tail(&eerec->list, &ee_queue.list);
897 spin_unlock_irqrestore(&ee_queue.lock, flags);
898 ret = queue_work(system_nrt_wq, &ee_work);
899 LOGI("%s: add new ee work, status %d\n", __func__, ret);
900 }
901
902 static void ee_worker(struct work_struct *work)
903 {
904 struct aed_eerec *eerec, *tmp;
905 unsigned long flags = 0;
906 list_for_each_entry_safe(eerec, tmp, &ee_queue.list, list) {
907 if (eerec == NULL) {
908 LOGE("%s:null eerec\n", __func__);
909 return;
910 }
911
912 ee_gen_ind_msg(eerec);
913 spin_lock_irqsave(&ee_queue.lock, flags);
914 list_del(&eerec->list);
915 spin_unlock_irqrestore(&ee_queue.lock, flags);
916 ee_destroy_log();
917 }
918 }
919
920 /******************************************************************************
921 * AED EE File operations
922 *****************************************************************************/
923 static int aed_ee_open(struct inode *inode, struct file *filp)
924 {
925 LOGD("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
926 return 0;
927 }
928
929 static int aed_ee_release(struct inode *inode, struct file *filp)
930 {
931 LOGD("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
932 return 0;
933 }
934
935 static unsigned int aed_ee_poll(struct file *file, struct poll_table_struct *ptable)
936 {
937 /* LOGD("%s\n", __func__); */
938 if (ee_log_avail()) {
939 return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
940 } else {
941 poll_wait(file, &aed_dev.eewait, ptable);
942 }
943 return 0;
944 }
945
946 static ssize_t aed_ee_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
947 {
948 return msg_copy_to_user(__func__, aed_dev.eerec->msg, buf, count, f_pos);
949 }
950
951 static ssize_t aed_ee_write(struct file *filp, const char __user *buf, size_t count,
952 loff_t *f_pos)
953 {
954 AE_Msg msg;
955 int rsize;
956 struct aed_eerec *eerec = aed_dev.eerec;
957
958 /* recevied a new request means the previous response is unavilable */
959 /* 1. set position to be zero */
960 /* 2. destroy the previous response message */
961 *f_pos = 0;
962
963 if (!eerec)
964 return -1;
965
966 msg_destroy(&eerec->msg);
967
968 /* the request must be an *AE_Msg buffer */
969 if (count != sizeof(AE_Msg)) {
970 LOGD("%s: ERR, aed_wirte count=%zx\n", __func__, count);
971 return -1;
972 }
973
974 rsize = copy_from_user(&msg, buf, count);
975 if (rsize != 0) {
976 LOGE("%s: ERR, copy_from_user rsize=%d\n", __func__, rsize);
977 return -1;
978 }
979
980 msg_show(__func__, &msg);
981
982 if (msg.cmdType == AE_REQ) {
983 if (!ee_log_avail()) {
984 ee_gen_notavail_msg();
985 return count;
986 }
987 switch (msg.cmdId) {
988 case AE_REQ_CLASS:
989 ee_gen_class_msg();
990 break;
991 case AE_REQ_TYPE:
992 ee_gen_type_msg();
993 break;
994 case AE_REQ_DETAIL:
995 ee_gen_detail_msg();
996 break;
997 case AE_REQ_PROCESS:
998 ee_gen_process_msg();
999 break;
1000 case AE_REQ_BACKTRACE:
1001 ee_gen_notavail_msg();
1002 break;
1003 case AE_REQ_COREDUMP:
1004 ee_gen_coredump_msg();
1005 break;
1006 default:
1007 LOGD("Unknown command id %d\n", msg.cmdId);
1008 ee_gen_notavail_msg();
1009 break;
1010 }
1011 } else if (msg.cmdType == AE_IND) {
1012 switch (msg.cmdId) {
1013 case AE_IND_LOG_CLOSE:
1014 up(&aed_ee_sem);
1015 break;
1016 default:
1017 /* IGNORE */
1018 break;
1019 }
1020 } else if (msg.cmdType == AE_RSP) { /* IGNORE */
1021 }
1022
1023 return count;
1024 }
1025
1026 /******************************************************************************
1027 * AED KE File operations
1028 *****************************************************************************/
1029 static int aed_ke_open(struct inode *inode, struct file *filp)
1030 {
1031 struct aee_oops *oops_open = NULL;
1032 int major = MAJOR(inode->i_rdev);
1033 int minor = MINOR(inode->i_rdev);
1034 unsigned char *devname = filp->f_path.dentry->d_iname;
1035 LOGD("%s:(%s)%d:%d\n", __func__, devname, major, minor);
1036
1037 if (strstr(devname, "aed1")) { /* aed_ke_open is also used by other device */
1038 oops_open = ipanic_oops_copy();
1039 if (oops_open == NULL) {
1040 return 0;
1041 }
1042 /* The panic log only occur on system startup, so check it now */
1043 ke_queue_request(oops_open);
1044 }
1045 return 0;
1046 }
1047
1048 static int aed_ke_release(struct inode *inode, struct file *filp)
1049 {
1050 LOGD("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
1051 return 0;
1052 }
1053
1054 static unsigned int aed_ke_poll(struct file *file, struct poll_table_struct *ptable)
1055 {
1056 if (ke_log_avail()) {
1057 return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
1058 }
1059 poll_wait(file, &aed_dev.kewait, ptable);
1060 return 0;
1061 }
1062
1063
1064 struct current_ke_buffer {
1065 void *data;
1066 ssize_t size;
1067 };
1068
1069 static void *current_ke_start(struct seq_file *m, loff_t *pos)
1070 {
1071 struct current_ke_buffer *ke_buffer;
1072 int index;
1073
1074 ke_buffer = m->private;
1075 if (ke_buffer == NULL)
1076 return NULL;
1077 index = *pos * (PAGE_SIZE - 1);
1078 if (index < ke_buffer->size)
1079 return ke_buffer->data + index;
1080 return NULL;
1081 }
1082
1083 static void *current_ke_next(struct seq_file *m, void *p, loff_t *pos)
1084 {
1085 struct current_ke_buffer *ke_buffer;
1086 int index;
1087 ke_buffer = m->private;
1088 if (ke_buffer == NULL)
1089 return NULL;
1090 ++*pos;
1091 index = *pos * (PAGE_SIZE - 1);
1092 if (index < ke_buffer->size)
1093 return ke_buffer->data + index;
1094 return NULL;
1095 }
1096
1097 static void current_ke_stop(struct seq_file *m, void *p)
1098 {
1099 return;
1100 }
1101
1102 static int current_ke_show(struct seq_file *m, void *p)
1103 {
1104 unsigned long len;
1105 struct current_ke_buffer *ke_buffer;
1106 ke_buffer = m->private;
1107 if (ke_buffer == NULL)
1108 return 0;
1109 if ((unsigned long)p >= (unsigned long)ke_buffer->data + ke_buffer->size)
1110 return 0;
1111 len = (unsigned long)ke_buffer->data + ke_buffer->size - (unsigned long)p;
1112 len = len < PAGE_SIZE ? len : (PAGE_SIZE - 1);
1113 if (seq_write(m, p, len)) {
1114 len = 0;
1115 return -1;
1116 }
1117 return 0;
1118 }
1119
1120 static const struct seq_operations current_ke_op = {
1121 .start = current_ke_start,
1122 .next = current_ke_next,
1123 .stop = current_ke_stop,
1124 .show = current_ke_show
1125 };
1126
1127 #define AED_CURRENT_KE_OPEN(ENTRY) \
1128 static int current_ke_##ENTRY##_open(struct inode *inode, struct file *file) \
1129 { \
1130 int ret; \
1131 struct aee_oops *oops; \
1132 struct seq_file *m; \
1133 struct current_ke_buffer *ke_buffer; \
1134 ret = seq_open_private(file, &current_ke_op, sizeof(struct current_ke_buffer)); \
1135 if (ret == 0) { \
1136 oops = aed_dev.kerec.lastlog; \
1137 m = file->private_data; \
1138 if (!oops) \
1139 return ret; \
1140 ke_buffer = (struct current_ke_buffer *)m->private; \
1141 ke_buffer->data = oops->ENTRY; \
1142 ke_buffer->size = oops->ENTRY##_len;\
1143 } \
1144 return ret; \
1145 }
1146
1147 #define AED_PROC_CURRENT_KE_FOPS(ENTRY) \
1148 static const struct file_operations proc_current_ke_##ENTRY##_fops = { \
1149 .open = current_ke_##ENTRY##_open, \
1150 .read = seq_read, \
1151 .llseek = seq_lseek, \
1152 .release = seq_release, \
1153 }
1154
1155
1156 static ssize_t aed_ke_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
1157 {
1158 return msg_copy_to_user(__func__, aed_dev.kerec.msg, buf, count, f_pos);
1159 }
1160
1161 static ssize_t aed_ke_write(struct file *filp, const char __user *buf, size_t count,
1162 loff_t *f_pos)
1163 {
1164 AE_Msg msg;
1165 int rsize;
1166
1167 /* recevied a new request means the previous response is unavilable */
1168 /* 1. set position to be zero */
1169 /* 2. destroy the previous response message */
1170 *f_pos = 0;
1171 msg_destroy(&aed_dev.kerec.msg);
1172
1173 /* the request must be an *AE_Msg buffer */
1174 if (count != sizeof(AE_Msg)) {
1175 LOGD("ERR: aed_wirte count=%zx\n", count);
1176 return -1;
1177 }
1178
1179 rsize = copy_from_user(&msg, buf, count);
1180 if (rsize != 0) {
1181 LOGD("copy_from_user rsize=%d\n", rsize);
1182 return -1;
1183 }
1184
1185 msg_show(__func__, &msg);
1186
1187 if (msg.cmdType == AE_REQ) {
1188 if (!ke_log_avail()) {
1189 ke_gen_notavail_msg();
1190
1191 return count;
1192 }
1193
1194 switch (msg.cmdId) {
1195 case AE_REQ_CLASS:
1196 ke_gen_class_msg();
1197 break;
1198 case AE_REQ_TYPE:
1199 ke_gen_type_msg();
1200 break;
1201 case AE_REQ_MODULE:
1202 ke_gen_module_msg();
1203 break;
1204 case AE_REQ_DETAIL:
1205 ke_gen_detail_msg(&msg);
1206 break;
1207 case AE_REQ_PROCESS:
1208 ke_gen_process_msg();
1209 break;
1210 case AE_REQ_BACKTRACE:
1211 ke_gen_backtrace_msg();
1212 break;
1213 case AE_REQ_USERSPACEBACKTRACE:
1214 ke_gen_userbacktrace_msg();
1215 break;
1216 case AE_REQ_USER_REG:
1217 ke_gen_user_reg_msg();
1218 break;
1219 case AE_REQ_USER_MAPS:
1220 ke_gen_usermaps_msg();
1221 break;
1222 default:
1223 ke_gen_notavail_msg();
1224 break;
1225 }
1226 } else if (msg.cmdType == AE_IND) {
1227 switch (msg.cmdId) {
1228 case AE_IND_LOG_CLOSE:
1229 /* real release operation move to ke_worker(): ke_destroy_log(); */
1230 up(&aed_ke_sem);
1231 break;
1232 default:
1233 /* IGNORE */
1234 break;
1235 }
1236 } else if (msg.cmdType == AE_RSP) { /* IGNORE */
1237 }
1238
1239 return count;
1240 }
1241
1242 static long aed_ioctl_bt(unsigned long arg)
1243 {
1244 int ret = 0;
1245 struct aee_ioctl ioctl;
1246 struct aee_process_bt bt;
1247
1248 if (copy_from_user(&ioctl, (struct aee_ioctl __user *)arg, sizeof(struct aee_ioctl))) {
1249 ret = -EFAULT;
1250 return ret;
1251 }
1252 bt.pid = ioctl.pid;
1253 ret = aed_get_process_bt(&bt);
1254 if (ret == 0) {
1255 ioctl.detail = 0xAEE00001;
1256 ioctl.size = bt.nr_entries;
1257 if (copy_to_user((struct aee_ioctl __user *)arg, &ioctl, sizeof(struct aee_ioctl))) {
1258 ret = -EFAULT;
1259 return ret;
1260 }
1261 if (!ioctl.out) {
1262 ret = -EFAULT;
1263 } else
1264 if (copy_to_user
1265 ((struct aee_bt_frame __user *)(unsigned long)ioctl.out,
1266 (const void *)bt.entries, sizeof(struct aee_bt_frame) * AEE_NR_FRAME)) {
1267 ret = -EFAULT;
1268 }
1269 }
1270 return ret;
1271 }
1272
1273 /*
1274 * aed process daemon and other command line may access me
1275 * concurrently
1276 */
1277 DEFINE_SEMAPHORE(aed_dal_sem);
1278 static long aed_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1279 {
1280 int ret = 0;
1281 if (cmd == AEEIOCTL_GET_PROCESS_BT)
1282 return aed_ioctl_bt(arg);
1283
1284
1285 if (down_interruptible(&aed_dal_sem) < 0) {
1286 return -ERESTARTSYS;
1287 }
1288
1289 switch (cmd) {
1290 case AEEIOCTL_SET_AEE_MODE:
1291 {
1292 if (copy_from_user(&aee_mode, (void __user *)arg, sizeof(aee_mode))) {
1293 ret = -EFAULT;
1294 goto EXIT;
1295 }
1296 LOGD("set aee mode = %d\n", aee_mode);
1297 break;
1298 }
1299 case AEEIOCTL_DAL_SHOW:
1300 {
1301 /*It's troublesome to allocate more than 1KB size on stack */
1302 struct aee_dal_show *dal_show = kzalloc(sizeof(struct aee_dal_show),
1303 GFP_KERNEL);
1304 if (dal_show == NULL) {
1305 ret = -EFAULT;
1306 goto EXIT;
1307 }
1308
1309 if (copy_from_user(dal_show, (struct aee_dal_show __user *)arg,
1310 sizeof(struct aee_dal_show))) {
1311 ret = -EFAULT;
1312 goto OUT;
1313 }
1314
1315 if (aee_mode >= AEE_MODE_CUSTOMER_ENG) {
1316 LOGD("DAL_SHOW not allowed (mode %d)\n", aee_mode);
1317 goto OUT;
1318 }
1319
1320 /* Try to prevent overrun */
1321 dal_show->msg[sizeof(dal_show->msg) - 1] = 0;
1322 #ifdef CONFIG_MTK_FB
1323 DAL_Printf("%s", dal_show->msg);
1324 #endif
1325
1326 OUT:
1327 kfree(dal_show);
1328 dal_show = NULL;
1329 goto EXIT;
1330 }
1331
1332 case AEEIOCTL_DAL_CLEAN:
1333 {
1334 /* set default bgcolor to red, it will be used in DAL_Clean */
1335 struct aee_dal_setcolor dal_setcolor;
1336 dal_setcolor.foreground = 0x00ff00; /*green */
1337 dal_setcolor.background = 0xff0000; /*red */
1338
1339 #ifdef CONFIG_MTK_FB
1340 DAL_SetColor(dal_setcolor.foreground, dal_setcolor.background);
1341 DAL_Clean();
1342 #endif
1343 break;
1344 }
1345
1346 case AEEIOCTL_SETCOLOR:
1347 {
1348 struct aee_dal_setcolor dal_setcolor;
1349
1350 if (aee_mode >= AEE_MODE_CUSTOMER_ENG) {
1351 LOGD("SETCOLOR not allowed (mode %d)\n", aee_mode);
1352 goto EXIT;
1353 }
1354
1355 if (copy_from_user(&dal_setcolor, (struct aee_dal_setcolor __user *)arg,
1356 sizeof(struct aee_dal_setcolor))) {
1357 ret = -EFAULT;
1358 goto EXIT;
1359 }
1360 #ifdef CONFIG_MTK_FB
1361 DAL_SetColor(dal_setcolor.foreground, dal_setcolor.background);
1362 DAL_SetScreenColor(dal_setcolor.screencolor);
1363 #endif
1364 break;
1365 }
1366
1367 case AEEIOCTL_GET_THREAD_REG:
1368 {
1369 struct aee_thread_reg *tmp;
1370
1371 LOGD("%s: get thread registers ioctl\n", __func__);
1372
1373 tmp = kzalloc(sizeof(struct aee_thread_reg), GFP_KERNEL);
1374 if (tmp == NULL) {
1375 ret = -ENOMEM;
1376 goto EXIT;
1377 }
1378
1379 if (copy_from_user
1380 (tmp, (struct aee_thread_reg __user *)arg,
1381 sizeof(struct aee_thread_reg))) {
1382 kfree(tmp);
1383 ret = -EFAULT;
1384 goto EXIT;
1385 }
1386
1387 if (tmp->tid > 0) {
1388 struct task_struct *task;
1389 struct pt_regs *user_ret = NULL;
1390 task = find_task_by_vpid(tmp->tid);
1391 if (task == NULL) {
1392 kfree(tmp);
1393 ret = -EINVAL;
1394 goto EXIT;
1395 }
1396 user_ret = task_pt_regs(task);
1397 if (NULL == user_ret) {
1398 kfree(tmp);
1399 ret = -EINVAL;
1400 goto EXIT;
1401 }
1402 memcpy(&(tmp->regs), user_ret, sizeof(struct pt_regs));
1403 if (copy_to_user
1404 ((struct aee_thread_reg __user *)arg, tmp,
1405 sizeof(struct aee_thread_reg))) {
1406 kfree(tmp);
1407 ret = -EFAULT;
1408 goto EXIT;
1409 }
1410
1411 } else {
1412 LOGD("%s: get thread registers ioctl tid invalid\n", __func__);
1413 kfree(tmp);
1414 ret = -EINVAL;
1415 goto EXIT;
1416 }
1417
1418 kfree(tmp);
1419
1420 break;
1421 }
1422
1423 case AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING: //get current user space reg when call aee_kernel_warning_api
1424 {
1425 LOGD("%s: AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING,call kthread create ,is ok\n", __func__);
1426 //kthread_create(Dstate_test, NULL, "D-state");
1427
1428 aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DEFAULT|DB_OPT_NATIVE_BACKTRACE, "AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING",
1429 "Trigger Kernel warning");
1430 break;
1431 }
1432
1433 case AEEIOCTL_CHECK_SUID_DUMPABLE:
1434 {
1435 int pid;
1436
1437 LOGD("%s: check suid dumpable ioctl\n", __func__);
1438
1439 if (copy_from_user(&pid, (void __user *)arg, sizeof(int))) {
1440 ret = -EFAULT;
1441 goto EXIT;
1442 }
1443
1444 if (pid > 0) {
1445 struct task_struct *task;
1446 int dumpable = -1;
1447 task = find_task_by_vpid(pid);
1448 if (task == NULL) {
1449 LOGD("%s: process:%d task null\n", __func__, pid);
1450 ret = -EINVAL;
1451 goto EXIT;
1452 }
1453 if (task->mm == NULL) {
1454 LOGD("%s: process:%d task mm null\n", __func__, pid);
1455 ret = -EINVAL;
1456 goto EXIT;
1457 }
1458 dumpable = get_dumpable(task->mm);
1459 if (dumpable == 0) {
1460 LOGD("%s: set process:%d dumpable\n", __func__, pid);
1461 set_dumpable(task->mm, 1);
1462 } else
1463 LOGD("%s: get process:%d dumpable:%d\n", __func__, pid,
1464 dumpable);
1465
1466 } else {
1467 LOGD("%s: check suid dumpable ioctl pid invalid\n", __func__);
1468 ret = -EINVAL;
1469 }
1470
1471 break;
1472 }
1473
1474 case AEEIOCTL_SET_FORECE_RED_SCREEN:
1475 {
1476 if (copy_from_user
1477 (&force_red_screen, (void __user *)arg, sizeof(force_red_screen))) {
1478 ret = -EFAULT;
1479 goto EXIT;
1480 }
1481 LOGD("force aee red screen = %d\n", force_red_screen);
1482 break;
1483 }
1484
1485 default:
1486 ret = -EINVAL;
1487 }
1488
1489 EXIT:
1490 up(&aed_dal_sem);
1491 return ret;
1492 }
1493
1494 static void aed_get_traces(char *msg)
1495 {
1496 struct stack_trace trace;
1497 unsigned long stacks[32];
1498 int i;
1499 int offset;
1500 trace.entries = stacks;
1501 /*save backtraces */
1502 trace.nr_entries = 0;
1503 trace.max_entries = 32;
1504 trace.skip = 2;
1505 save_stack_trace_tsk(current, &trace);
1506 offset = strlen(msg);
1507 for (i = 0; i < trace.nr_entries; i++) {
1508 offset += snprintf(msg + offset, AEE_BACKTRACE_LENGTH - offset, "[<%p>] %pS\n",
1509 (void *)trace.entries[i], (void *)trace.entries[i]);
1510 }
1511 }
1512
1513 void Log2Buffer(struct aee_oops *oops,const char *fmt, ...)
1514 {
1515 char buf[256];
1516 int len = 0;
1517 va_list ap;
1518
1519 va_start(ap, fmt);
1520 len = strlen(oops->userthread_maps.Userthread_maps);
1521
1522 if ((len + sizeof(buf)) < MaxMapsSize)
1523 {
1524 vsnprintf(&oops->userthread_maps.Userthread_maps[len], sizeof(buf), fmt, ap);
1525 oops->userthread_maps.Userthread_mapsLength=len + sizeof(buf);
1526 }
1527 va_end(ap);
1528 }
1529
1530 int DumpThreadNativeInfo(struct aee_oops *oops)
1531 {
1532 struct task_struct *current_task;
1533 struct pt_regs *user_ret;
1534 struct vm_area_struct *vma;
1535 unsigned long userstack_start = 0;
1536 unsigned long userstack_end = 0, length = 0;
1537 int mapcount = 0;
1538 struct file *file;
1539 int flags;
1540 struct mm_struct *mm;
1541 int ret = 0;
1542
1543 current_task = get_current();
1544 user_ret = task_pt_regs(current_task);
1545 //CurrentUserPid=current_task->pid; //Thread id
1546 oops->userthread_reg.tid=current_task->tgid; //process ID
1547 oops->userthread_stack.tid=current_task->tgid; //process ID
1548 oops->userthread_maps.tid=current_task->tgid; //process ID
1549
1550 memcpy(&oops->userthread_reg.regs, user_ret, sizeof(struct pt_regs));
1551 LOGE(" pid:%d /// tgid:%d, stack:0x%08lx\n", current_task->pid, current_task->tgid,(long)oops->userthread_stack.Userthread_Stack);
1552 if (!user_mode(user_ret))
1553 return 0;
1554
1555 if (current_task->mm == NULL)
1556 return 0;
1557
1558
1559
1560 #if 1
1561 vma = current_task->mm->mmap;
1562 while (vma && (mapcount < current_task->mm->map_count)) {
1563 file = vma->vm_file;
1564 flags = vma->vm_flags;
1565 if (file) {
1566
1567 LOGE("%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
1568 flags & VM_READ ? 'r' : '-',
1569 flags & VM_WRITE ? 'w' : '-',
1570 flags & VM_EXEC ? 'x' : '-',
1571 flags & VM_MAYSHARE ? 's' : 'p', (unsigned char *)(file->f_path.dentry->d_iname));
1572 Log2Buffer(oops,"%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
1573 flags & VM_READ ? 'r' : '-',
1574 flags & VM_WRITE ? 'w' : '-',
1575 flags & VM_EXEC ? 'x' : '-',
1576 flags & VM_MAYSHARE ? 's' : 'p', (unsigned char *)(file->f_path.dentry->d_iname));
1577 } else {
1578 const char *name = arch_vma_name(vma);
1579 mm = vma->vm_mm;
1580 if (!name) {
1581 if (mm) {
1582 if (vma->vm_start <= mm->start_brk &&
1583 vma->vm_end >= mm->brk) {
1584 name = "[heap]";
1585 } else if (vma->vm_start <= mm->start_stack &&
1586 vma->vm_end >= mm->start_stack) {
1587 name = "[stack]";
1588 }
1589 } else {
1590 name = "[vdso]";
1591 }
1592 }
1593 /* if (name) */
1594 {
1595
1596 LOGE("%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
1597 flags & VM_READ ? 'r' : '-',
1598 flags & VM_WRITE ? 'w' : '-',
1599 flags & VM_EXEC ? 'x' : '-',
1600 flags & VM_MAYSHARE ? 's' : 'p', name);
1601
1602 Log2Buffer(oops,"%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
1603 flags & VM_READ ? 'r' : '-',
1604 flags & VM_WRITE ? 'w' : '-',
1605 flags & VM_EXEC ? 'x' : '-',
1606 flags & VM_MAYSHARE ? 's' : 'p', name);
1607
1608
1609 }
1610 }
1611 vma = vma->vm_next;
1612 mapcount++;
1613
1614 }
1615 #endif
1616
1617 LOGE("maps addr(0x%08lx), maps len:%d\n", (long)oops->userthread_maps.Userthread_maps, oops->userthread_maps.Userthread_mapsLength);
1618
1619 #ifndef __aarch64__ //32bit
1620 LOGE(" pc/lr/sp 0x%08lx/0x%08lx/0x%08lx\n", user_ret->ARM_pc, user_ret->ARM_lr,
1621 user_ret->ARM_sp);
1622 userstack_start = (unsigned long)user_ret->ARM_sp;
1623
1624 vma = current_task->mm->mmap;
1625 while (vma != NULL) {
1626 if (vma->vm_start <= userstack_start && vma->vm_end >= userstack_start) {
1627 userstack_end = vma->vm_end;
1628 break;
1629 }
1630 vma = vma->vm_next;
1631 if (vma == current_task->mm->mmap) {
1632 break;
1633 }
1634 }
1635 if (userstack_end == 0) {
1636 LOGE("Dump native stack failed:\n");
1637 return 0;
1638 }
1639 LOGE("Dump stack range (0x%08lx:0x%08lx)\n", userstack_start, userstack_end);
1640 length =((userstack_end - userstack_start) <
1641 (MaxStackSize-1)) ? (userstack_end - userstack_start) : (MaxStackSize-1);
1642 oops->userthread_stack.StackLength=length;
1643
1644
1645 ret = copy_from_user((void *)(oops->userthread_stack.Userthread_Stack), (const void __user *)(userstack_start), length);
1646 LOGE("u+k 32 copy_from_user ret(0x%08x),len:%lx\n", ret, length);
1647 LOGE("end dump native stack:\n");
1648 #else //64bit, First deal with K64+U64, the last time to deal with K64+U32
1649
1650 if (is_compat_task())//K64_U32
1651 {
1652
1653 LOGE(" K64+ U32 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n", (long)(user_ret->user_regs.pc), (long)(user_ret->user_regs.regs[14]),
1654 (long)(user_ret->user_regs.regs[13]) );
1655 userstack_start = (unsigned long)user_ret->user_regs.regs[13];
1656 vma = current_task->mm->mmap;
1657 while (vma != NULL) {
1658 if (vma->vm_start <= userstack_start && vma->vm_end >= userstack_start) {
1659 userstack_end = vma->vm_end;
1660 break;
1661 }
1662 vma = vma->vm_next;
1663 if (vma == current_task->mm->mmap) {
1664 break;
1665 }
1666 }
1667 if (userstack_end == 0) {
1668 LOGE("Dump native stack failed:\n");
1669 return 0;
1670 }
1671 LOGE("Dump stack range (0x%08lx:0x%08lx)\n", userstack_start, userstack_end);
1672 length =((userstack_end - userstack_start) <
1673 (MaxStackSize-1)) ? (userstack_end - userstack_start) : (MaxStackSize-1);
1674 oops->userthread_stack.StackLength=length;
1675 ret = copy_from_user((void *)(oops->userthread_stack.Userthread_Stack), (const void __user *)(userstack_start), length);
1676 LOGE("copy_from_user ret(0x%16x),len:%lx\n", ret, length);
1677 }
1678 else //K64+U64
1679 {
1680 LOGE(" K64+ U64 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n", (long)(user_ret->user_regs.pc), (long)(user_ret->user_regs.regs[30]),
1681 (long)(user_ret->user_regs.sp) );
1682 userstack_start = (unsigned long)user_ret->user_regs.sp;
1683 vma = current_task->mm->mmap;
1684 while (vma != NULL)
1685 {
1686 if (vma->vm_start <= userstack_start && vma->vm_end >= userstack_start) {
1687 userstack_end = vma->vm_end;
1688 break;
1689 }
1690 vma = vma->vm_next;
1691 if (vma == current_task->mm->mmap) {
1692 break;
1693 }
1694 }
1695 if (userstack_end == 0) {
1696 LOGE("Dump native stack failed:\n");
1697 return 0;
1698 }
1699
1700 LOGE("Dump stack range (0x%16lx:0x%16lx)\n", userstack_start, userstack_end);
1701 length =((userstack_end - userstack_start) <
1702 (MaxStackSize-1)) ? (userstack_end - userstack_start) : (MaxStackSize-1);
1703 oops->userthread_stack.StackLength=length;
1704 ret = copy_from_user((void *)(oops->userthread_stack.Userthread_Stack), (const void __user *)(userstack_start), length);
1705 LOGE("copy_from_user ret(0x%08x),len:%lx\n", ret, length);
1706 }
1707
1708 #endif
1709 return 0;
1710 }
1711
1712 static void kernel_reportAPI(const AE_DEFECT_ATTR attr, const int db_opt, const char *module,
1713 const char *msg)
1714 {
1715 struct aee_oops *oops;
1716 int n = 0;
1717 if (aee_mode >= AEE_MODE_CUSTOMER_USER || (aee_mode == AEE_MODE_CUSTOMER_ENG && attr > AE_DEFECT_EXCEPTION))
1718 return;
1719 oops = aee_oops_create(attr, AE_KERNEL_PROBLEM_REPORT, module);
1720 if (NULL != oops) {
1721 n += snprintf(oops->backtrace, AEE_BACKTRACE_LENGTH, msg);
1722 snprintf(oops->backtrace + n, AEE_BACKTRACE_LENGTH - n, "\nBacktrace:\n");
1723 aed_get_traces(oops->backtrace);
1724 oops->detail = (char *)(oops->backtrace);
1725 oops->detail_len = strlen(oops->backtrace) + 1;
1726 oops->dump_option = db_opt;
1727 #ifdef __aarch64__
1728 if ((db_opt & DB_OPT_NATIVE_BACKTRACE) && !is_compat_task())
1729 oops->dump_option |= DB_OPT_AARCH64;
1730 #endif
1731 if(db_opt & DB_OPT_NATIVE_BACKTRACE)
1732 {
1733 oops->userthread_stack.Userthread_Stack= vzalloc(MaxStackSize);
1734 if (oops->userthread_stack.Userthread_Stack == NULL)
1735 {
1736 LOGE("%s: oops->userthread_stack.Userthread_Stack Vmalloc fail", __func__);
1737 return;
1738 }
1739 oops->userthread_maps.Userthread_maps= vzalloc(MaxMapsSize);
1740 if (oops->userthread_maps.Userthread_maps == NULL)
1741 {
1742 LOGE("%s: oops->userthread_maps.Userthread_maps Vmalloc fail", __func__);
1743 return;
1744 }
1745 LOGE("%s: oops->userthread_stack.Userthread_Stack :0x%08lx,maps:0x%08lx", __func__,(long)oops->userthread_stack.Userthread_Stack,(long)oops->userthread_maps.Userthread_maps);
1746 oops->userthread_stack.StackLength=MaxStackSize; //default 8k
1747 oops->userthread_maps.Userthread_mapsLength=MaxMapsSize; //default 8k
1748 DumpThreadNativeInfo(oops);
1749
1750 }
1751 LOGI("%s,%s,%s,0x%x\n", __func__, module, msg, db_opt);
1752 ke_queue_request(oops);
1753 }
1754 }
1755
1756 #ifndef PARTIAL_BUILD
1757 void aee_kernel_dal_api(const char *file, const int line, const char *msg)
1758 {
1759 LOGW("aee_kernel_dal_api : <%s:%d> %s ", file, line, msg);
1760 if (in_interrupt()) {
1761 LOGE("aee_kernel_dal_api: in interrupt context, skip");
1762 return;
1763 }
1764
1765 #if defined(CONFIG_MTK_AEE_AED) && defined(CONFIG_MTK_FB)
1766 if (down_interruptible(&aed_dal_sem) < 0) {
1767 LOGI("ERROR : aee_kernel_dal_api() get aed_dal_sem fail ");
1768 return;
1769 }
1770 if (msg != NULL) {
1771 struct aee_dal_setcolor dal_setcolor;
1772 struct aee_dal_show *dal_show = kzalloc(sizeof(struct aee_dal_show), GFP_KERNEL);
1773 if (dal_show == NULL) {
1774 LOGI("ERROR : aee_kernel_dal_api() kzalloc fail\n ");
1775 up(&aed_dal_sem);
1776 return;
1777 }
1778 if (((aee_mode == AEE_MODE_MTK_ENG) && (force_red_screen == AEE_FORCE_NOT_SET))
1779 || ((aee_mode < AEE_MODE_CUSTOMER_ENG)
1780 && (force_red_screen == AEE_FORCE_RED_SCREEN))) {
1781 dal_setcolor.foreground = 0xff00ff; /* fg: purple */
1782 dal_setcolor.background = 0x00ff00; /* bg: green */
1783 DAL_SetColor(dal_setcolor.foreground, dal_setcolor.background);
1784 dal_setcolor.screencolor = 0xff0000; /* screen:red */
1785 DAL_SetScreenColor(dal_setcolor.screencolor);
1786 strncpy(dal_show->msg, msg, sizeof(dal_show->msg) - 1);
1787 dal_show->msg[sizeof(dal_show->msg) - 1] = 0;
1788 DAL_Printf("%s", dal_show->msg);
1789 kfree(dal_show);
1790 } else {
1791 LOGD("DAL not allowed (mode %d)\n", aee_mode);
1792 }
1793 }
1794 up(&aed_dal_sem);
1795 #endif
1796 }
1797 #else
1798 void aee_kernel_dal_api(const char *file, const int line, const char *msg)
1799 {
1800 LOGW("aee_kernel_dal_api : <%s:%d> %s ", file, line, msg);
1801 return;
1802 }
1803 #endif
1804 EXPORT_SYMBOL(aee_kernel_dal_api);
1805
1806 static void external_exception(const char *assert_type, const int *log, int log_size,
1807 const int *phy, int phy_size, const char *detail, const int db_opt)
1808 {
1809 int *ee_log = NULL;
1810 struct aed_eerec *eerec;
1811
1812 LOGD("%s : [%s] log ptr %p size %d, phy ptr %p size %d\n", __func__,
1813 assert_type, log, log_size, phy, phy_size);
1814 if (aee_mode >= AEE_MODE_CUSTOMER_USER)
1815 return;
1816 eerec = kzalloc(sizeof(struct aed_eerec), GFP_ATOMIC);
1817 if (eerec == NULL) {
1818 LOGE("%s: kmalloc fail", __func__);
1819 return;
1820 }
1821
1822 if ((log_size > 0) && (log != NULL)) {
1823 eerec->ee_log_size = log_size;
1824 ee_log = (int *)kmalloc(log_size, GFP_ATOMIC);
1825 if (NULL != ee_log) {
1826 eerec->ee_log = ee_log;
1827 memcpy(ee_log, log, log_size);
1828 }
1829 } else {
1830 eerec->ee_log_size = 16;
1831 ee_log = (int *)kzalloc(eerec->ee_log_size, GFP_ATOMIC);
1832 eerec->ee_log = ee_log;
1833 }
1834
1835 if (NULL == ee_log) {
1836 LOGE("%s : memory alloc() fail\n", __func__);
1837 return;
1838 }
1839
1840
1841 memset(eerec->assert_type, 0, sizeof(eerec->assert_type));
1842 strncpy(eerec->assert_type, assert_type, sizeof(eerec->assert_type) - 1);
1843 memset(eerec->exp_filename, 0, sizeof(eerec->exp_filename));
1844 strncpy(eerec->exp_filename, detail, sizeof(eerec->exp_filename) - 1);
1845 LOGD("EE [%s]\n", eerec->assert_type);
1846
1847 eerec->exp_linenum = 0;
1848 eerec->fatal1 = 0;
1849 eerec->fatal2 = 0;
1850
1851 /* Check if we can dump memory */
1852 if (in_interrupt()) {
1853 /* kernel vamlloc cannot be used in interrupt context */
1854 LOGD("External exception occur in interrupt context, no coredump");
1855 phy_size = 0;
1856 } else if ((phy < 0) || (phy_size > MAX_EE_COREDUMP)) {
1857 LOGD("EE Physical memory size(%d) too large or invalid", phy_size);
1858 phy_size = 0;
1859 }
1860
1861 if (phy_size > 0) {
1862 eerec->ee_phy = (int *)vmalloc_user(phy_size);
1863 if (eerec->ee_phy != NULL) {
1864 memcpy(eerec->ee_phy, phy, phy_size);
1865 eerec->ee_phy_size = phy_size;
1866 } else {
1867 LOGD("Losing ee phy mem due to vmalloc return NULL\n");
1868 eerec->ee_phy_size = 0;
1869 }
1870 } else {
1871 eerec->ee_phy = NULL;
1872 eerec->ee_phy_size = 0;
1873 }
1874 eerec->db_opt = db_opt;
1875 ee_queue_request(eerec);
1876 LOGD("external_exception out\n");
1877 }
1878
1879 static bool rr_reported;
1880 module_param(rr_reported, bool, S_IRUSR | S_IWUSR);
1881
1882 static struct aee_kernel_api kernel_api = {
1883 .kernel_reportAPI = kernel_reportAPI,
1884 .md_exception = external_exception,
1885 .md32_exception = external_exception,
1886 .combo_exception = external_exception
1887 };
1888
1889 extern int ksysfs_bootinfo_init(void);
1890 extern void ksysfs_bootinfo_exit(void);
1891
1892 AED_CURRENT_KE_OPEN(console);
1893 AED_PROC_CURRENT_KE_FOPS(console);
1894 AED_CURRENT_KE_OPEN(userspace_info);
1895 AED_PROC_CURRENT_KE_FOPS(userspace_info);
1896 AED_CURRENT_KE_OPEN(android_main);
1897 AED_PROC_CURRENT_KE_FOPS(android_main);
1898 AED_CURRENT_KE_OPEN(android_radio);
1899 AED_PROC_CURRENT_KE_FOPS(android_radio);
1900 AED_CURRENT_KE_OPEN(android_system);
1901 AED_PROC_CURRENT_KE_FOPS(android_system);
1902 AED_CURRENT_KE_OPEN(mmprofile);
1903 AED_PROC_CURRENT_KE_FOPS(mmprofile);
1904 AED_CURRENT_KE_OPEN(mini_rdump);
1905 AED_PROC_CURRENT_KE_FOPS(mini_rdump);
1906
1907
1908 static int current_ke_ee_coredump_open(struct inode *inode, struct file *file)
1909 {
1910 int ret = seq_open_private(file, &current_ke_op, sizeof(struct current_ke_buffer));
1911 if (ret == 0) {
1912 struct aed_eerec *eerec = aed_dev.eerec;
1913 struct seq_file *m = file->private_data;
1914 struct current_ke_buffer *ee_buffer;
1915 if (!eerec)
1916 return ret;
1917 ee_buffer = (struct current_ke_buffer *)m->private;
1918 ee_buffer->data = eerec->ee_phy;
1919 ee_buffer->size = eerec->ee_phy_size;
1920 }
1921 return ret;
1922 }
1923
1924 /* AED_CURRENT_KE_OPEN(ee_coredump); */
1925 AED_PROC_CURRENT_KE_FOPS(ee_coredump);
1926
1927
1928 static int aed_proc_init(void)
1929 {
1930 aed_proc_dir = proc_mkdir("aed", NULL);
1931 if (aed_proc_dir == NULL) {
1932 LOGE("aed proc_mkdir failed\n");
1933 return -ENOMEM;
1934 }
1935
1936 AED_PROC_ENTRY(current-ke-console, current_ke_console, S_IRUSR);
1937 AED_PROC_ENTRY(current-ke-userspace_info, current_ke_userspace_info, S_IRUSR);
1938 AED_PROC_ENTRY(current-ke-android_system, current_ke_android_system, S_IRUSR);
1939 AED_PROC_ENTRY(current-ke-android_radio, current_ke_android_radio, S_IRUSR);
1940 AED_PROC_ENTRY(current-ke-android_main, current_ke_android_main, S_IRUSR);
1941 AED_PROC_ENTRY(current-ke-mmprofile, current_ke_mmprofile, S_IRUSR);
1942 AED_PROC_ENTRY(current-ke-mini_rdump, current_ke_mini_rdump, S_IRUSR);
1943 AED_PROC_ENTRY(current-ee-coredump, current_ke_ee_coredump, S_IRUSR);
1944
1945 aee_rr_proc_init(aed_proc_dir);
1946
1947 aed_proc_debug_init(aed_proc_dir);
1948
1949 dram_console_init(aed_proc_dir);
1950
1951 return 0;
1952 }
1953
1954 static int aed_proc_done(void)
1955 {
1956 remove_proc_entry(CURRENT_KE_CONSOLE, aed_proc_dir);
1957 remove_proc_entry(CURRENT_EE_COREDUMP, aed_proc_dir);
1958
1959 aed_proc_debug_done(aed_proc_dir);
1960
1961 dram_console_done(aed_proc_dir);
1962
1963 remove_proc_entry("aed", NULL);
1964 return 0;
1965 }
1966
1967 /******************************************************************************
1968 * Module related
1969 *****************************************************************************/
1970 static struct file_operations aed_ee_fops = {
1971 .owner = THIS_MODULE,
1972 .open = aed_ee_open,
1973 .release = aed_ee_release,
1974 .poll = aed_ee_poll,
1975 .read = aed_ee_read,
1976 .write = aed_ee_write,
1977 .unlocked_ioctl = aed_ioctl,
1978 #ifdef CONFIG_COMPAT
1979 .compat_ioctl = aed_ioctl,
1980 #endif
1981 };
1982
1983 static struct file_operations aed_ke_fops = {
1984 .owner = THIS_MODULE,
1985 .open = aed_ke_open,
1986 .release = aed_ke_release,
1987 .poll = aed_ke_poll,
1988 .read = aed_ke_read,
1989 .write = aed_ke_write,
1990 .unlocked_ioctl = aed_ioctl,
1991 #ifdef CONFIG_COMPAT
1992 .compat_ioctl = aed_ioctl,
1993 #endif
1994 };
1995
1996 /* QHQ RT Monitor end */
1997 static struct miscdevice aed_ee_dev = {
1998 .minor = MISC_DYNAMIC_MINOR,
1999 .name = "aed0",
2000 .fops = &aed_ee_fops,
2001 };
2002
2003
2004
2005 static struct miscdevice aed_ke_dev = {
2006 .minor = MISC_DYNAMIC_MINOR,
2007 .name = "aed1",
2008 .fops = &aed_ke_fops,
2009 };
2010
2011 static int __init aed_init(void)
2012 {
2013 int err = 0;
2014 err = aed_proc_init();
2015 if (err != 0)
2016 return err;
2017
2018 err = ksysfs_bootinfo_init();
2019 if (err != 0)
2020 return err;
2021
2022 spin_lock_init(&ke_queue.lock);
2023 spin_lock_init(&ee_queue.lock);
2024 INIT_LIST_HEAD(&ke_queue.list);
2025 INIT_LIST_HEAD(&ee_queue.list);
2026
2027 init_waitqueue_head(&aed_dev.eewait);
2028 memset(&aed_dev.kerec, 0, sizeof(struct aed_kerec));
2029 init_waitqueue_head(&aed_dev.kewait);
2030
2031 INIT_WORK(&ke_work, ke_worker);
2032 INIT_WORK(&ee_work, ee_worker);
2033
2034 aee_register_api(&kernel_api);
2035
2036 spin_lock_init(&aed_device_lock);
2037 err = misc_register(&aed_ee_dev);
2038 if (unlikely(err)) {
2039 LOGE("aee: failed to register aed0(ee) device!\n");
2040 return err;
2041 }
2042
2043 err = misc_register(&aed_ke_dev);
2044 if (unlikely(err)) {
2045 LOGE("aee: failed to register aed1(ke) device!\n");
2046 return err;
2047 }
2048
2049 return err;
2050 }
2051
2052 static void __exit aed_exit(void)
2053 {
2054 int err;
2055
2056 err = misc_deregister(&aed_ee_dev);
2057 if (unlikely(err))
2058 LOGE("xLog: failed to unregister aed(ee) device!\n");
2059 err = misc_deregister(&aed_ke_dev);
2060 if (unlikely(err))
2061 LOGE("xLog: failed to unregister aed(ke) device!\n");
2062
2063 ee_destroy_log();
2064 ke_destroy_log();
2065
2066 aed_proc_done();
2067 ksysfs_bootinfo_exit();
2068 }
2069 module_init(aed_init);
2070 module_exit(aed_exit);
2071
2072 MODULE_LICENSE("GPL");
2073 MODULE_DESCRIPTION("MediaTek AED Driver");
2074 MODULE_AUTHOR("MediaTek Inc.");